hadoop git commit: HADOOP-15629. Missing trimming in readlink in case of protocol. Contrbuted by Giovanni Matteo Fumarola

2018-07-26 Thread botong
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-15461 bac459b3f -> d71df5aac


HADOOP-15629. Missing trimming in readlink in case of protocol. Contrbuted by 
Giovanni Matteo Fumarola


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d71df5aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d71df5aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d71df5aa

Branch: refs/heads/HADOOP-15461
Commit: d71df5aac9c7df9303cef7a5ba740bfe20958374
Parents: bac459b
Author: Botong Huang 
Authored: Thu Jul 26 21:35:13 2018 -0700
Committer: Botong Huang 
Committed: Thu Jul 26 21:35:13 2018 -0700

--
 .../java/org/apache/hadoop/fs/FileUtil.java | 21 +++-
 1 file changed, 16 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d71df5aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index bf3feb5..ab3d913 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -30,7 +30,10 @@ import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.net.InetAddress;
+import java.net.MalformedURLException;
 import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
 import java.net.UnknownHostException;
 import java.nio.charset.Charset;
 import java.nio.file.AccessDeniedException;
@@ -203,18 +206,26 @@ public class FileUtil {
   return "";
 }
 
-if (Files.isSymbolicLink(f.toPath())) {
+// This will make sure we remove the protocol as file://
+java.nio.file.Path pathFile;
+try {
+  pathFile = Paths.get(new URL(f.toString()).toURI());
+} catch (MalformedURLException | URISyntaxException e) {
+  pathFile = f.toPath();
+}
+
+if (Files.isSymbolicLink(pathFile)) {
   java.nio.file.Path p = null;
   try {
-p = Files.readSymbolicLink(f.toPath());
+p = Files.readSymbolicLink(pathFile);
   } catch (Exception e) {
-LOG.warn("Exception while reading the symbolic link "
-+ f.getAbsolutePath() + ". Exception= " + e.getMessage());
+LOG.warn("Exception while reading the symbolic link {}. Exception= {}",
+f, e.getMessage());
 return "";
   }
   return p.toAbsolutePath().toString();
 }
-LOG.warn("The file " + f.getAbsolutePath() + " is not a symbolic link.");
+LOG.warn("The file {} is not a symbolic link.", f);
 return "";
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r1836770 - /hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

2018-07-26 Thread tasanuma
Author: tasanuma
Date: Fri Jul 27 01:12:48 2018
New Revision: 1836770

URL: http://svn.apache.org/viewvc?rev=1836770=rev
Log:
Add Takanobu to the Committer list

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1836770=1836769=1836770=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Fri 
Jul 27 01:12:48 2018
@@ -1516,6 +1516,14 @@

 

+ tasanuma
+ http://people.apache.org/~tasanuma;>Takanobu 
Asanuma
+ Yahoo! JAPAN
+ 
+ +9
+   
+
+   
  taton
  http://people.apache.org/~taton;>Christophe 
Taton
  INRIA



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8429. Improve diagnostic message when artifact is not set properly. Contributed by Gour Saha

2018-07-26 Thread eyang
Repository: hadoop
Updated Branches:
  refs/remotes/origin/branch-3.1 a869bd970 -> 6b1a2d811


YARN-8429. Improve diagnostic message when artifact is not set properly.
   Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b1a2d81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b1a2d81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b1a2d81

Branch: refs/remotes/origin/branch-3.1
Commit: 6b1a2d811be44712153dd3577f535e8afa66aa34
Parents: a869bd9
Author: Eric Yang 
Authored: Thu Jul 26 20:02:13 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 20:04:17 2018 -0400

--
 .../exceptions/RestApiErrorMessages.java|  6 +-
 .../provider/AbstractClientProvider.java| 14 ++---
 .../defaultImpl/DefaultClientProvider.java  | 22 ---
 .../provider/docker/DockerClientProvider.java   | 15 ++---
 .../provider/tarball/TarballClientProvider.java | 27 
 .../yarn/service/utils/ServiceApiUtil.java  |  4 +-
 .../hadoop/yarn/service/TestServiceApiUtil.java |  9 ++-
 .../providers/TestAbstractClientProvider.java   | 29 -
 .../providers/TestDefaultClientProvider.java| 66 
 9 files changed, 138 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b1a2d81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
index 5b3c72c..f10d884 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
@@ -50,6 +50,10 @@ public interface RestApiErrorMessages {
   "Artifact id (like docker image name) is either empty or not provided";
   String ERROR_ARTIFACT_ID_FOR_COMP_INVALID =
   ERROR_ARTIFACT_ID_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+  String ERROR_ARTIFACT_PATH_FOR_COMP_INVALID = "For component %s with %s "
+  + "artifact, path does not exist: %s";
+  String ERROR_CONFIGFILE_DEST_FILE_FOR_COMP_NOT_ABSOLUTE = "For component %s "
+  + "with %s artifact, dest_file must be a relative path: %s";
 
   String ERROR_RESOURCE_INVALID = "Resource is not provided";
   String ERROR_RESOURCE_FOR_COMP_INVALID =
@@ -89,7 +93,7 @@ public interface RestApiErrorMessages {
   String ERROR_ABSENT_NUM_OF_INSTANCE =
   "Num of instances should appear either globally or per component";
   String ERROR_ABSENT_LAUNCH_COMMAND =
-  "Launch_command is required when type is not DOCKER";
+  "launch_command is required when type is not DOCKER";
 
   String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at"
   + " component level, needs corresponding values set at service level";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b1a2d81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
index d16c698..0145cee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
@@ -68,18 +68,18 @@ public abstract class AbstractClientProvider {
* Validate the artifact.
* @param artifact
*/
-  public abstract void validateArtifact(Artifact 

[45/50] hadoop git commit: YARN-8577. Fix the broken anchor in SLS site-doc. Contributed by Weiwei Yang.

2018-07-26 Thread eyang
YARN-8577. Fix the broken anchor in SLS site-doc. Contributed by Weiwei Yang.

(cherry picked from commit 3d3158cea4580eb2e3b2af635c3a7d30f4dbb873)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2212c20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2212c20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2212c20

Branch: refs/remotes/origin/branch-3.1
Commit: d2212c20c56944b974d10609f73387fd44df6ce1
Parents: 8e65057
Author: bibinchundatt 
Authored: Wed Jul 25 16:19:14 2018 +0530
Committer: bibinchundatt 
Committed: Wed Jul 25 19:00:12 2018 +0530

--
 .../hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2212c20/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
--
diff --git 
a/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md 
b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
index 9df4998..e487267 100644
--- a/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
+++ b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
@@ -27,7 +27,7 @@ YARN Scheduler Load Simulator (SLS)
 * [Metrics](#Metrics)
 * [Real-time Tracking](#Real-time_Tracking)
 * [Offline Analysis](#Offline_Analysis)
-* [Synthetic Load Generator](#SynthGen)
+* [Synthetic Load Generator](#Synthetic_Load_Generator)
 * [Appendix](#Appendix)
 * [Resources](#Resources)
 * [SLS JSON input file format](#SLS_JSON_input_file_format)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] hadoop git commit: HDFS-13524. Occasional "All datanodes are bad" error in TestLargeBlock#testLargeBlockSize. Contributed by Siyao Meng.

2018-07-26 Thread eyang
HDFS-13524. Occasional "All datanodes are bad" error in 
TestLargeBlock#testLargeBlockSize. Contributed by Siyao Meng.

(cherry picked from commit 88b2794244d19b6432253eb649a375e5bcdcf964)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac9155d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac9155d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac9155d6

Branch: refs/remotes/origin/branch-3.1
Commit: ac9155d6cb174ba19a8cf64b7d0142e147248e04
Parents: 4898edf
Author: Wei-Chiu Chuang 
Authored: Mon Jul 16 10:51:23 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Jul 16 10:52:56 2018 -0700

--
 .../src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java   | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac9155d6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
index a37da35..ec7a077 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
@@ -50,6 +50,7 @@ public class TestLargeBlock {
   // should we verify the data read back from the file? (slow)
   static final boolean verifyData = true;
   static final byte[] pattern = { 'D', 'E', 'A', 'D', 'B', 'E', 'E', 'F'};
+  static final int numDatanodes = 3;
 
   // creates a file 
   static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl,
@@ -158,7 +159,7 @@ public class TestLargeBlock {
* timeout here.
* @throws IOException in case of errors
*/
-  @Test (timeout = 90)
+  @Test (timeout = 180)
   public void testLargeBlockSize() throws IOException {
 final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B
 runTest(blockSize);
@@ -175,7 +176,8 @@ public class TestLargeBlock {
 final long fileSize = blockSize + 1L;
 
 Configuration conf = new Configuration();
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(numDatanodes).build();
 FileSystem fs = cluster.getFileSystem();
 try {
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] hadoop git commit: HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed by Kitti Nansi.

2018-07-26 Thread eyang
Repository: hadoop
Updated Branches:
  refs/remotes/origin/branch-3.1 6f31faf92 -> a869bd970


HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed 
by Kitti Nansi.

(cherry picked from commit eecb5ba54599aeae758abd4007e55e5b531f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/242b5acd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/242b5acd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/242b5acd

Branch: refs/remotes/origin/branch-3.1
Commit: 242b5acdb358d886b6cc23707bc4e7b484ff514e
Parents: e994c4f
Author: Andrew Wang 
Authored: Mon Jul 9 15:17:21 2018 +0200
Committer: Andrew Wang 
Committed: Mon Jul 9 15:17:34 2018 +0200

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 13 +
 1 file changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/242b5acd/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index d17020d..c092bff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1286,11 +1286,10 @@
   dfs.image.transfer.timeout
   6
   
-Socket timeout for image transfer in milliseconds. This timeout and 
the related
-dfs.image.transfer.bandwidthPerSec parameter should be configured such
-that normal image transfer can complete successfully.
-This timeout prevents client hangs when the sender fails during
-image transfer. This is socket timeout during image transfer.
+Socket timeout for the HttpURLConnection instance used in the image
+transfer. This is measured in milliseconds.
+This timeout prevents client hangs if the connection is idle
+for this configured timeout, during image transfer.
   
 
 
@@ -1301,9 +1300,7 @@
 Maximum bandwidth used for regular image transfers (instead of
 bootstrapping the standby namenode), in bytes per second.
 This can help keep normal namenode operations responsive during
-checkpointing. The maximum bandwidth and timeout in
-dfs.image.transfer.timeout should be set such that normal image
-transfers can complete successfully.
+checkpointing.
 A default value of 0 indicates that throttling is disabled.
 The maximum bandwidth used for bootstrapping standby namenode is
 configured with dfs.image.transfer-bootstrap-standby.bandwidthPerSec.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] hadoop git commit: YARN-8541. RM startup failure on recovery after user deletion. Contributed by Bibin A Chundatt.

2018-07-26 Thread eyang
YARN-8541. RM startup failure on recovery after user deletion. Contributed by 
Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e65057e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e65057e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e65057e

Branch: refs/remotes/origin/branch-3.1
Commit: 8e65057eb10d03db08781da7a5ad8855155883ed
Parents: b89624a
Author: bibinchundatt 
Authored: Wed Jul 25 15:54:32 2018 +0530
Committer: bibinchundatt 
Committed: Wed Jul 25 15:54:32 2018 +0530

--
 .../server/resourcemanager/RMAppManager.java| 48 ++--
 .../placement/PlacementManager.java |  9 
 .../TestWorkPreservingRMRestart.java| 48 
 3 files changed, 72 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e65057e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 3e64cfc..7011aaa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -364,17 +364,9 @@ public class RMAppManager implements 
EventHandler,
   ApplicationSubmissionContext submissionContext, long submitTime,
   String user, boolean isRecovery, long startTime) throws YarnException {
 
-ApplicationPlacementContext placementContext = null;
-try {
-  placementContext = placeApplication(rmContext, submissionContext, user);
-} catch (YarnException e) {
-  String msg =
-  "Failed to place application " + submissionContext.getApplicationId()
-  + " to queue and specified " + "queue is invalid : "
-  + submissionContext.getQueue();
-  LOG.error(msg, e);
-  throw e;
-}
+ApplicationPlacementContext placementContext =
+placeApplication(rmContext.getQueuePlacementManager(),
+submissionContext, user, isRecovery);
 
 // We only replace the queue when it's a new application
 if (!isRecovery) {
@@ -789,23 +781,31 @@ public class RMAppManager implements 
EventHandler,
   }
 
   @VisibleForTesting
-  ApplicationPlacementContext placeApplication(RMContext rmContext,
-  ApplicationSubmissionContext context, String user) throws YarnException {
+  ApplicationPlacementContext placeApplication(
+  PlacementManager placementManager, ApplicationSubmissionContext context,
+  String user, boolean isRecovery) throws YarnException {
 ApplicationPlacementContext placementContext = null;
-PlacementManager placementManager = rmContext.getQueuePlacementManager();
-
 if (placementManager != null) {
-  placementContext = placementManager.placeApplication(context, user);
-} else{
-  if ( context.getQueue() == null || context.getQueue().isEmpty()) {
-final String msg = "Queue Placement Manager is not set. Cannot place "
-+ "application : " + context.getApplicationId() + " to queue and "
-+ "specified queue is invalid " + context.getQueue();
-LOG.error(msg);
-throw new YarnException(msg);
+  try {
+placementContext = placementManager.placeApplication(context, user);
+  } catch (YarnException e) {
+// Placement could also fail if the user doesn't exist in system
+// skip if the user is not found during recovery.
+if (isRecovery) {
+  LOG.warn("PlaceApplication failed,skipping on recovery of rm");
+  return placementContext;
+}
+throw e;
   }
 }
-
+if (placementContext == null && (context.getQueue() == null) || context
+.getQueue().isEmpty()) {
+  String msg = "Failed to place application " + context.getApplicationId()
+  + " to queue and specified " + "queue is invalid : " + context
+  .getQueue();
+  LOG.error(msg);
+  throw new YarnException(msg);
+}
 return placementContext;
   }
 


[22/50] hadoop git commit: YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via rkanter)

2018-07-26 Thread eyang
YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via 
rkanter)

(cherry picked from commit 1bc106a738a6ce4f7ed025d556bb44c1ede022e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfa71428
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfa71428
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfa71428

Branch: refs/remotes/origin/branch-3.1
Commit: dfa71428ea19835ba84d97f98ca78ec02790a209
Parents: 1c7d916
Author: Robert Kanter 
Authored: Thu Jul 12 16:38:46 2018 -0700
Committer: Robert Kanter 
Committed: Wed Jul 18 16:07:48 2018 -0700

--
 .../container-executor/test/test-container-executor.c | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfa71428/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index a199d84..5607823 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1203,19 +1203,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+int is_empty(char *name);
+
 void test_is_empty() {
   printf("\nTesting is_empty function\n");
   if (is_empty("/")) {
 printf("FAIL: / should not be empty\n");
 exit(1);
   }
-  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+  char *noexist = TEST_ROOT "/noexist";
+  if (is_empty(noexist)) {
+printf("%s should not exist\n", noexist);
 exit(1);
   }
-  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
-  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+  char *emptydir = TEST_ROOT "/emptydir";
+  mkdir(emptydir, S_IRWXU);
+  if (!is_empty(emptydir)) {
+printf("FAIL: %s should be empty\n", emptydir);
 exit(1);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] hadoop git commit: YARN-8512. ATSv2 entities are not published to HBase from second attempt onwards. Contributed by Rohith Sharma K S.

2018-07-26 Thread eyang
YARN-8512. ATSv2 entities are not published to HBase from second attempt 
onwards. Contributed by Rohith Sharma K S.

(cherry picked from commit 7f1d3d0e9dbe328fae0d43421665e0b6907b33fe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b4ead92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b4ead92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b4ead92

Branch: refs/remotes/origin/branch-3.1
Commit: 9b4ead92c811b02bdfc62acf00fc364533eecab8
Parents: 6f10491
Author: Sunil G 
Authored: Wed Jul 11 12:26:32 2018 +0530
Committer: Sunil G 
Committed: Wed Jul 11 12:27:12 2018 +0530

--
 .../containermanager/ContainerManagerImpl.java  |  69 
 .../application/ApplicationImpl.java|   7 +-
 .../BaseContainerManagerTest.java   |  25 +
 .../TestContainerManagerRecovery.java   | 106 +--
 4 files changed, 180 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b4ead92/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 3470910..ad63720 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -1102,24 +1102,8 @@ public class ContainerManagerImpl extends 
CompositeService implements
   // Create the application
   // populate the flow context from the launch context if the timeline
   // service v.2 is enabled
-  FlowContext flowContext = null;
-  if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
-String flowName = launchContext.getEnvironment()
-.get(TimelineUtils.FLOW_NAME_TAG_PREFIX);
-String flowVersion = launchContext.getEnvironment()
-.get(TimelineUtils.FLOW_VERSION_TAG_PREFIX);
-String flowRunIdStr = launchContext.getEnvironment()
-.get(TimelineUtils.FLOW_RUN_ID_TAG_PREFIX);
-long flowRunId = 0L;
-if (flowRunIdStr != null && !flowRunIdStr.isEmpty()) {
-  flowRunId = Long.parseLong(flowRunIdStr);
-}
-flowContext = new FlowContext(flowName, flowVersion, flowRunId);
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Flow context: " + flowContext
-  + " created for an application " + applicationID);
-}
-  }
+  FlowContext flowContext =
+  getFlowContext(launchContext, applicationID);
 
   Application application =
   new ApplicationImpl(dispatcher, user, flowContext,
@@ -1138,6 +1122,31 @@ public class ContainerManagerImpl extends 
CompositeService implements
 dispatcher.getEventHandler().handle(new ApplicationInitEvent(
 applicationID, appAcls, logAggregationContext));
   }
+} else if (containerTokenIdentifier.getContainerType()
+== ContainerType.APPLICATION_MASTER) {
+  FlowContext flowContext =
+  getFlowContext(launchContext, applicationID);
+  if (flowContext != null) {
+ApplicationImpl application =
+(ApplicationImpl) context.getApplications().get(applicationID);
+
+// update flowContext reference in ApplicationImpl
+application.setFlowContext(flowContext);
+
+// Required to update state store for recovery.
+context.getNMStateStore().storeApplication(applicationID,
+buildAppProto(applicationID, user, credentials,
+container.getLaunchContext().getApplicationACLs(),
+containerTokenIdentifier.getLogAggregationContext(),
+flowContext));
+
+LOG.info(
+"Updated application reference with flowContext " + flowContext
++ " for app " + applicationID);
+  } else {
+LOG.info("TimelineService V2.0 is not enabled. Skipping 

[34/50] hadoop git commit: YARN-8380. Support bind propagation options for mounts in docker runtime. Contributed by Billie Rinaldi

2018-07-26 Thread eyang
YARN-8380.  Support bind propagation options for mounts in docker runtime.
Contributed by Billie Rinaldi

(cherry picked from commit 8688a0c7f88f2adf1a7fce695e06f3dd1f745080)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23b8546a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23b8546a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23b8546a

Branch: refs/remotes/origin/branch-3.1
Commit: 23b8546a802fdd30d5a3382cec136d1b85e34cdf
Parents: e665c0a
Author: Eric Yang 
Authored: Mon Jul 23 20:12:04 2018 -0400
Committer: Eric Yang 
Committed: Mon Jul 23 20:13:41 2018 -0400

--
 .../runtime/DockerLinuxContainerRuntime.java|  37 ++-
 .../linux/runtime/docker/DockerRunCommand.java  |  18 +-
 .../container-executor/impl/utils/docker-util.c | 196 --
 .../test/utils/test_docker_util.cc  | 133 +-
 .../runtime/TestDockerContainerRuntime.java | 259 +--
 .../gpu/TestNvidiaDockerV1CommandPlugin.java|   2 +-
 .../src/site/markdown/DockerContainers.md   |  13 +-
 7 files changed, 349 insertions(+), 309 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b8546a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 5d95875..1793b2e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -157,9 +157,13 @@ import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  * {@code YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS} allows users to specify
  + additional volume mounts for the Docker container. The value of the
  * environment variable should be a comma-separated list of mounts.
- * All such mounts must be given as {@code source:dest:mode}, and the mode
+ * All such mounts must be given as {@code source:dest[:mode]} and the mode
  * must be "ro" (read-only) or "rw" (read-write) to specify the type of
- * access being requested. The requested mounts will be validated by
+ * access being requested. If neither is specified, read-write will be
+ * assumed. The mode may include a bind propagation option. In that case,
+ * the mode should either be of the form [option], rw+[option], or
+ * ro+[option]. Valid bind propagation options are shared, rshared, slave,
+ * rslave, private, and rprivate. The requested mounts will be validated by
  * container-executor based on the values set in container-executor.cfg for
  * {@code docker.allowed.ro-mounts} and {@code docker.allowed.rw-mounts}.
  *   
@@ -192,7 +196,8 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   private static final Pattern hostnamePattern = Pattern.compile(
   HOSTNAME_PATTERN);
   private static final Pattern USER_MOUNT_PATTERN = Pattern.compile(
-  "(?<=^|,)([^:\\x00]+):([^:\\x00]+):([a-z]+)");
+  "(?<=^|,)([^:\\x00]+):([^:\\x00]+)" +
+  "(:(r[ow]|(r[ow][+])?(r?shared|r?slave|r?private)))?(?:,|$)");
   private static final int HOST_NAME_LENGTH = 64;
   private static final String DEFAULT_PROCFS = "/proc";
 
@@ -844,24 +849,30 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 + environment.get(ENV_DOCKER_CONTAINER_MOUNTS));
   }
   parsedMounts.reset();
+  long mountCount = 0;
   while (parsedMounts.find()) {
+mountCount++;
 String src = parsedMounts.group(1);
 java.nio.file.Path srcPath = java.nio.file.Paths.get(src);
 if (!srcPath.isAbsolute()) {
   src = mountReadOnlyPath(src, localizedResources);
 }
 String dst = parsedMounts.group(2);
-String mode = parsedMounts.group(3);
-if (!mode.equals("ro") && !mode.equals("rw")) {
-  throw new ContainerExecutionException(
-  

[21/50] hadoop git commit: YARN-8538. Fixed memory leaks in container-executor and test cases. Contributed by Billie Rinaldi

2018-07-26 Thread eyang
YARN-8538.  Fixed memory leaks in container-executor and test cases.
Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d82edec3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d82edec3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d82edec3

Branch: refs/remotes/origin/branch-3.1
Commit: d82edec3c02db4eae3af91760aa31dfbbe7a9e03
Parents: 228508a
Author: Eric Yang 
Authored: Mon Jul 16 17:38:49 2018 -0400
Committer: Eric Yang 
Committed: Wed Jul 18 13:44:49 2018 -0400

--
 .../container-executor/impl/configuration.c |   3 +
 .../main/native/container-executor/impl/main.c  |   4 +-
 .../impl/modules/cgroups/cgroups-operations.c   |   2 +
 .../container-executor/impl/utils/docker-util.c | 234 ++-
 .../test/modules/cgroups/test-cgroups-module.cc |   8 +
 .../test/modules/fpga/test-fpga-module.cc   |  24 +-
 .../test/modules/gpu/test-gpu-module.cc |  24 +-
 .../test/test_configuration.cc  |  34 ++-
 .../native/container-executor/test/test_util.cc |   5 +
 .../test/utils/test-string-utils.cc |   6 +
 .../test/utils/test_docker_util.cc  | 128 ++
 11 files changed, 307 insertions(+), 165 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d82edec3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
index f23cff0..baaa4dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c
@@ -58,6 +58,7 @@ void free_section(struct section *section) {
 section->name = NULL;
   }
   section->size = 0;
+  free(section);
 }
 
 //clean up method for freeing configuration
@@ -466,6 +467,7 @@ static void merge_sections(struct section *section1, struct 
section *section2, c
   section1->size += section2->size;
   if (free_second_section) {
 free(section2->name);
+free(section2->kv_pairs);
 memset(section2, 0, sizeof(*section2));
 free(section2);
   }
@@ -708,6 +710,7 @@ char *get_config_path(const char *argv0) {
 
   const char *orig_conf_file = HADOOP_CONF_DIR "/" CONF_FILENAME;
   char *conf_file = resolve_config_path(orig_conf_file, executable_file);
+  free(executable_file);
   if (conf_file == NULL) {
 fprintf(ERRORFILE, "Configuration file %s not found.\n", orig_conf_file);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d82edec3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 3d7b19a..1ed3ce8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -128,11 +128,13 @@ static void flush_and_close_log_files() {
 LOGFILE = NULL;
   }
 
-if (ERRORFILE != NULL) {
+  if (ERRORFILE != NULL) {
 fflush(ERRORFILE);
 fclose(ERRORFILE);
 ERRORFILE = NULL;
   }
+
+  free_executor_configurations();
 }
 
 /** Validates the current container-executor setup. Causes program exit

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d82edec3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/cgroups/cgroups-operations.c
index 

[10/50] hadoop git commit: HDFS-13729. Fix broken links to RBF documentation. Contributed by Gabor Bota.

2018-07-26 Thread eyang
HDFS-13729. Fix broken links to RBF documentation. Contributed by Gabor Bota.

(cherry picked from commit 418cc7f3aeabedc57c94aa9d4c4248c1476ac90e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe256a98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe256a98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe256a98

Branch: refs/remotes/origin/branch-3.1
Commit: fe256a98ff7b8cfb2b7c5c43fe965720e371ce1a
Parents: f9fa3cb
Author: Akira Ajisaka 
Authored: Wed Jul 11 14:46:43 2018 -0400
Committer: Akira Ajisaka 
Committed: Wed Jul 11 14:47:21 2018 -0400

--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 4 ++--
 .../hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md | 2 +-
 hadoop-project/src/site/markdown/index.md.vm | 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe256a98/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 9ed69bf..391b71b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -420,7 +420,7 @@ Runs a HDFS dfsadmin client.
 
 Usage: `hdfs dfsrouter`
 
-Runs the DFS router. See [Router](./HDFSRouterFederation.html#Router) for more 
info.
+Runs the DFS router. See 
[Router](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Router) for more info.
 
 ### `dfsrouteradmin`
 
@@ -449,7 +449,7 @@ Usage:
 | `-nameservice` `disable` `enable` *nameservice* | Disable/enable  a name 
service from the federation. If disabled, requests will not go to that name 
service. |
 | `-getDisabledNameservices` | Get the name services that are disabled in the 
federation. |
 
-The commands for managing Router-based federation. See [Mount table 
management](./HDFSRouterFederation.html#Mount_table_management) for more info.
+The commands for managing Router-based federation. See [Mount table 
management](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Mount_table_management)
 for more info.
 
 ### `diskbalancer`
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe256a98/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
index 01e7076..b8d5321 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsProvidedStorage.md
@@ -38,7 +38,7 @@ is limited to creating a *read-only image* of a remote 
namespace that implements
 to serve the image. Specifically, reads from a snapshot of a remote namespace 
are
 supported. Adding a remote namespace to an existing/running namenode, 
refreshing the
 remote snapshot, unmounting, and writes are not available in this release. One
-can use [ViewFs](./ViewFs.html) and [RBF](HDFSRouterFederation.html) to
+can use [ViewFs](./ViewFs.html) and 
[RBF](../hadoop-hdfs-rbf/HDFSRouterFederation.html) to
 integrate namespaces with `PROVIDED` storage into an existing deployment.
 
 Creating HDFS Clusters with `PROVIDED` Storage

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe256a98/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index 8b9cfda..438145a 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -225,7 +225,7 @@ cluster for existing HDFS clients.
 
 See [HDFS-10467](https://issues.apache.org/jira/browse/HDFS-10467) and the
 HDFS Router-based Federation
-[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.html) 
for
+[documentation](./hadoop-project-dist/hadoop-hdfs-rbf/HDFSRouterFederation.html)
 for
 more details.
 
 API-based configuration of Capacity Scheduler queue configuration


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] hadoop git commit: YARN-8360. Improve YARN service restart policy and node manager auto restart policy. Contributed by Suma Shivaprasad

2018-07-26 Thread eyang
YARN-8360. Improve YARN service restart policy and node manager auto restart 
policy.
   Contributed by Suma Shivaprasad

(cherry picked from commit 84d7bf1eeff6b9418361afa4aa713e5e6f771365)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e665c0a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e665c0a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e665c0a9

Branch: refs/remotes/origin/branch-3.1
Commit: e665c0a9dd29248ac0cc22ec5a0c830ada13df60
Parents: 4f2a129
Author: Eric Yang 
Authored: Mon Jul 23 12:57:01 2018 -0400
Committer: Eric Yang 
Committed: Mon Jul 23 12:59:37 2018 -0400

--
 .../service/component/AlwaysRestartPolicy.java  |  5 ++
 .../component/ComponentRestartPolicy.java   |  2 +
 .../service/component/NeverRestartPolicy.java   |  5 ++
 .../component/OnFailureRestartPolicy.java   |  5 ++
 .../provider/AbstractProviderService.java   | 29 +
 .../hadoop/yarn/service/ServiceTestUtils.java   |  2 +-
 .../containerlaunch/TestAbstractLauncher.java   | 66 
 7 files changed, 101 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e665c0a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
index 704ab14..505120d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/AlwaysRestartPolicy.java
@@ -79,4 +79,9 @@ public final class AlwaysRestartPolicy implements 
ComponentRestartPolicy {
   @Override public boolean shouldTerminate(Component component) {
 return false;
   }
+
+  @Override public boolean allowContainerRetriesForInstance(
+  ComponentInstance componentInstance) {
+return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e665c0a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
index 23b0fb9..c5adffe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentRestartPolicy.java
@@ -42,4 +42,6 @@ public interface ComponentRestartPolicy {
 
   boolean shouldTerminate(Component component);
 
+  boolean allowContainerRetriesForInstance(ComponentInstance 
componentInstance);
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e665c0a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/NeverRestartPolicy.java
index ace1f89..cd44a58 100644
--- 

[31/50] hadoop git commit: YARN-8528. Final states in ContainerAllocation might be modified externally causing unexpected allocation results. Contributed by Xintong Song.

2018-07-26 Thread eyang
YARN-8528. Final states in ContainerAllocation might be modified externally 
causing unexpected allocation results. Contributed by Xintong Song.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/004e1f24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/004e1f24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/004e1f24

Branch: refs/remotes/origin/branch-3.1
Commit: 004e1f248ef20b78f9d12d6f1fe04f66d8c56158
Parents: 823d576
Author: Weiwei Yang 
Authored: Fri Jul 20 22:32:11 2018 +0800
Committer: Weiwei Yang 
Committed: Fri Jul 20 22:43:47 2018 +0800

--
 .../capacity/allocator/ContainerAllocation.java |  2 +-
 .../allocator/RegularContainerAllocator.java| 10 ++--
 .../capacity/TestCapacityScheduler.java | 48 
 3 files changed, 54 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/004e1f24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
index f408508..b9b9bcf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/ContainerAllocation.java
@@ -56,7 +56,7 @@ public class ContainerAllocation {
 
   RMContainer containerToBeUnreserved;
   private Resource resourceToBeAllocated = Resources.none();
-  AllocationState state;
+  private AllocationState state;
   NodeType containerNodeType = NodeType.NODE_LOCAL;
   NodeType requestLocalityType = null;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/004e1f24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
index 99deb1a..adc27f5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
@@ -263,7 +263,7 @@ public class RegularContainerAllocator extends 
AbstractContainerAllocator {
 reservedContainer, schedulingMode, resourceLimits);
 
 if (null == reservedContainer) {
-  if (result.state == AllocationState.PRIORITY_SKIPPED) {
+  if (result.getAllocationState() == AllocationState.PRIORITY_SKIPPED) {
 // Don't count 'skipped nodes' as a scheduling opportunity!
 application.subtractSchedulingOpportunity(schedulerKey);
   }
@@ -487,8 +487,8 @@ public class RegularContainerAllocator extends 
AbstractContainerAllocator {
 
   // When a returned allocation is LOCALITY_SKIPPED, since we're in
   // off-switch request now, we will skip this app w.r.t priorities 
-  if (allocation.state == AllocationState.LOCALITY_SKIPPED) {
-allocation.state = AllocationState.APP_SKIPPED;
+  if (allocation.getAllocationState() == AllocationState.LOCALITY_SKIPPED) 
{
+allocation = ContainerAllocation.APP_SKIPPED;
   }
   allocation.requestLocalityType = requestLocalityType;
 
@@ -836,8 +836,8 @@ public class RegularContainerAllocator extends 
AbstractContainerAllocator {
   result = tryAllocateOnNode(clusterResource, node, 

[27/50] hadoop git commit: YARN-8501. Reduce complexity of RMWebServices getApps method. Contributed by Szilard Nemeth

2018-07-26 Thread eyang
YARN-8501. Reduce complexity of RMWebServices getApps method.
   Contributed by Szilard Nemeth

(cherry picked from commit 5836e0a46bf9793e0a61bb8ec46536f4a67d38d7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76b8beb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76b8beb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76b8beb2

Branch: refs/remotes/origin/branch-3.1
Commit: 76b8beb289c0d0b77653eae69de3b1469a417883
Parents: a147098
Author: Eric Yang 
Authored: Thu Jul 19 12:30:38 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 19 12:32:55 2018 -0400

--
 .../hadoop/yarn/server/webapp/WebServices.java  |   2 +-
 .../webapp/ApplicationsRequestBuilder.java  | 231 
 .../resourcemanager/webapp/RMWebServices.java   | 145 +
 .../webapp/TestApplicationsRequestBuilder.java  | 529 +++
 4 files changed, 777 insertions(+), 130 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76b8beb2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
index df4656f..8b00b9e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
@@ -392,7 +392,7 @@ public class WebServices {
 response.setContentType(null);
   }
 
-  protected static Set
+  public static Set
   parseQueries(Set queries, boolean isState) {
 Set params = new HashSet();
 if (!queries.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76b8beb2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java
new file mode 100644
index 000..876d044
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/ApplicationsRequestBuilder.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
+.CapacityScheduler;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.apache.hadoop.yarn.server.webapp.WebServices.parseQueries;
+
+public class ApplicationsRequestBuilder {
+
+  private Set statesQuery = Sets.newHashSet();
+  private Set users = Sets.newHashSetWithExpectedSize(1);
+  private Set queues = Sets.newHashSetWithExpectedSize(1);
+  private String limit = null;
+  private Long limitNumber;
+
+  // 

[08/50] hadoop git commit: HDFS-13726. RBF: Fix RBF configuration links. Contributed by Takanobu Asanuma.

2018-07-26 Thread eyang
HDFS-13726. RBF: Fix RBF configuration links. Contributed by Takanobu Asanuma.

(cherry picked from commit 2ae13d41dcd4f49e6b4ebc099e5f8bb8280b9872)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99e98bf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99e98bf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99e98bf1

Branch: refs/remotes/origin/branch-3.1
Commit: 99e98bf19a70711935823bc82861cd67c1e89c1c
Parents: caf3853
Author: Yiqun Lin 
Authored: Wed Jul 11 22:11:59 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Jul 11 22:15:07 2018 +0800

--
 .../hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99e98bf1/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index 70c6226..73e0f4a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -175,7 +175,7 @@ Deployment
 
 By default, the Router is ready to take requests and monitor the NameNode in 
the local machine.
 It needs to know the State Store endpoint by setting 
`dfs.federation.router.store.driver.class`.
-The rest of the options are documented in 
[hdfs-default.xml](../hadoop-hdfs/hdfs-default.xml).
+The rest of the options are documented in 
[hdfs-rbf-default.xml](../hadoop-hdfs-rbf/hdfs-rbf-default.xml).
 
 Once the Router is configured, it can be started:
 
@@ -290,7 +290,7 @@ Router configuration
 
 
 One can add the configurations for Router-based federation to 
**hdfs-site.xml**.
-The main options are documented in 
[hdfs-default.xml](../hadoop-hdfs/hdfs-default.xml).
+The main options are documented in 
[hdfs-rbf-default.xml](../hadoop-hdfs-rbf/hdfs-rbf-default.xml).
 The configuration values are described in this section.
 
 ### RPC server


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] hadoop git commit: HDFS-12837. Intermittent failure in TestReencryptionWithKMS.

2018-07-26 Thread eyang
HDFS-12837. Intermittent failure in TestReencryptionWithKMS.

(cherry picked from commit b37074be5ab35c238e18bb9c3b89db6d7f8d0986)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5d98755
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5d98755
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5d98755

Branch: refs/remotes/origin/branch-3.1
Commit: d5d987550a2aa089761440f1e64be59eb446c2a7
Parents: fe256a9
Author: Xiao Chen 
Authored: Wed Jul 11 20:54:37 2018 -0700
Committer: Xiao Chen 
Committed: Wed Jul 11 21:03:45 2018 -0700

--
 .../server/namenode/ReencryptionHandler.java|  4 +-
 .../hdfs/server/namenode/TestReencryption.java  | 61 +++-
 2 files changed, 37 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5d98755/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index 5b52c82..b92fe9f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -616,7 +616,9 @@ public class ReencryptionHandler implements Runnable {
   while (shouldPauseForTesting) {
 LOG.info("Sleeping in the re-encrypt handler for unit test.");
 synchronized (reencryptionHandler) {
-  reencryptionHandler.wait(3);
+  if (shouldPauseForTesting) {
+reencryptionHandler.wait(3);
+  }
 }
 LOG.info("Continuing re-encrypt handler after pausing.");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5d98755/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
index 7685f31..647 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
@@ -67,6 +67,7 @@ import static 
org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -207,8 +208,7 @@ public class TestReencryption {
 ZoneReencryptionStatus zs = it.next();
 assertEquals(zone.toString(), zs.getZoneName());
 assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
-assertTrue(zs.getCompletionTime() > 0);
-assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+verifyZoneCompletionTime(zs);
 assertNotEquals(fei0.getEzKeyVersionName(), zs.getEzKeyVersionName());
 assertEquals(fei1.getEzKeyVersionName(), zs.getEzKeyVersionName());
 assertEquals(10, zs.getFilesReencrypted());
@@ -600,14 +600,27 @@ public class TestReencryption {
 final ZoneReencryptionStatus zs = it.next();
 assertEquals(zone.toString(), zs.getZoneName());
 assertEquals(ZoneReencryptionStatus.State.Completed, zs.getState());
-assertTrue(zs.getCompletionTime() > 0);
-assertTrue(zs.getCompletionTime() > zs.getSubmissionTime());
+verifyZoneCompletionTime(zs);
 if (fei != null) {
   assertNotEquals(fei.getEzKeyVersionName(), zs.getEzKeyVersionName());
 }
 assertEquals(expectedFiles, zs.getFilesReencrypted());
   }
 
+  /**
+   * Verify the zone status' completion time is larger than 0, and is no less
+   * than submission time.
+   */
+  private void verifyZoneCompletionTime(final ZoneReencryptionStatus zs) {
+assertNotNull(zs);
+assertTrue("Completion time should be positive. " + zs.getCompletionTime(),
+zs.getCompletionTime() > 0);
+assertTrue("Completion time " + zs.getCompletionTime()
++ " should be no less than submission time "
++ zs.getSubmissionTime(),
+zs.getCompletionTime() >= zs.getSubmissionTime());
+  }
+
   @Test
   

[41/50] hadoop git commit: HADOOP-15609. Retry KMS calls when SSLHandshakeException occurs. Contributed by Kitti Nanasi.

2018-07-26 Thread eyang
HADOOP-15609. Retry KMS calls when SSLHandshakeException occurs. Contributed by 
Kitti Nanasi.

(cherry picked from commit 81d59506e539673edde12e19c0df5c2edd9d02ad)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40c06b38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40c06b38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40c06b38

Branch: refs/remotes/origin/branch-3.1
Commit: 40c06b389a15eeef655f108b422993ef12a1fb5b
Parents: 00c476a
Author: Xiao Chen 
Authored: Tue Jul 24 21:45:14 2018 -0700
Committer: Xiao Chen 
Committed: Tue Jul 24 21:46:21 2018 -0700

--
 .../key/kms/LoadBalancingKMSClientProvider.java | 17 -
 .../kms/TestLoadBalancingKMSClientProvider.java | 79 
 2 files changed, 92 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c06b38/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index 42cd47d..9677b0d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.net.ConnectException;
 import java.security.GeneralSecurityException;
 import java.security.NoSuchAlgorithmException;
 import java.util.Arrays;
@@ -27,6 +28,8 @@ import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import javax.net.ssl.SSLHandshakeException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
@@ -115,7 +118,6 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 if (providers.length == 0) {
   throw new IOException("No providers configured !");
 }
-IOException ex = null;
 int numFailovers = 0;
 for (int i = 0;; i++, numFailovers++) {
   KMSClientProvider provider = providers[(currPos + i) % providers.length];
@@ -130,8 +132,15 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   } catch (IOException ioe) {
 LOG.warn("KMS provider at [{}] threw an IOException: ",
 provider.getKMSUrl(), ioe);
-ex = ioe;
-
+// SSLHandshakeException can occur here because of lost connection
+// with the KMS server, creating a ConnectException from it,
+// so that the FailoverOnNetworkExceptionRetry policy will retry
+if (ioe instanceof SSLHandshakeException) {
+  Exception cause = ioe;
+  ioe = new ConnectException("SSLHandshakeException: "
+  + cause.getMessage());
+  ioe.initCause(cause);
+}
 RetryAction action = null;
 try {
   action = retryPolicy.shouldRetry(ioe, 0, numFailovers, false);
@@ -153,7 +162,7 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
   CommonConfigurationKeysPublic.
   KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, providers.length),
   providers.length);
-  throw ex;
+  throw ioe;
 }
 if (((numFailovers + 1) % providers.length) == 0) {
   // Sleep only after we try all the providers for every cycle.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40c06b38/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
index bd68dca..4e7aed9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.crypto.key.kms;
 
 import static 

[13/50] hadoop git commit: YARN-8434. Update federation documentation of Nodemanager configurations. Contributed by Bibin A Chundatt.

2018-07-26 Thread eyang
YARN-8434. Update federation documentation of Nodemanager configurations. 
Contributed by Bibin A Chundatt.

(cherry picked from commit 4523cc5637bc3558aa5796150b358ca8471773bb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/677bbdcd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/677bbdcd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/677bbdcd

Branch: refs/remotes/origin/branch-3.1
Commit: 677bbdcdc439e4bc06cdb34b44fe1d06ab03b1fa
Parents: 7cbb959
Author: bibinchundatt 
Authored: Sun Jul 15 13:53:53 2018 +0530
Committer: bibinchundatt 
Committed: Sun Jul 15 13:57:14 2018 +0530

--
 .../hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md| 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/677bbdcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
index 953f826..aeb7677 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
@@ -267,7 +267,6 @@ These are extra configurations that should appear in the 
**conf/yarn-site.xml**
 |: |: |
 | `yarn.nodemanager.amrmproxy.enabled` | `true` | Whether or not the AMRMProxy 
is enabled. |
 | `yarn.nodemanager.amrmproxy.interceptor-class.pipeline` | 
`org.apache.hadoop.yarn.server.nodemanager.amrmproxy.FederationInterceptor` | A 
comma-separated list of interceptors to be run at the amrmproxy. For federation 
the last step in the pipeline should be the FederationInterceptor. |
-| `yarn.client.failover-proxy-provider` | 
`org.apache.hadoop.yarn.server.federation.failover.FederationRMFailoverProxyProvider`
 | The class used to connect to the RMs by looking up the membership 
information in federation state-store. This must be set if federation is 
enabled, even if RM HA is not enabled.|
 
 Optional:
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] hadoop git commit: YARN-8506. Make GetApplicationsRequestPBImpl thread safe. (wangda)

2018-07-26 Thread eyang
YARN-8506. Make GetApplicationsRequestPBImpl thread safe. (wangda)

Change-Id: I2c006965375823c83036e7f45f7163d13c0bdf90


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ad82ea5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ad82ea5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ad82ea5

Branch: refs/remotes/origin/branch-3.1
Commit: 8ad82ea5b5b0c87e5771dcaa5530d4fbe03c92ae
Parents: 242b5ac
Author: Wangda Tan 
Authored: Mon Jul 9 11:35:15 2018 -0700
Committer: Wangda Tan 
Committed: Mon Jul 9 11:35:15 2018 -0700

--
 .../impl/pb/GetApplicationsRequestPBImpl.java   | 44 ++--
 1 file changed, 22 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ad82ea5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
index ad009d6..bc6be80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
@@ -65,7 +65,7 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
 viaProto = true;
   }
 
-  public GetApplicationsRequestProto getProto() {
+  public synchronized GetApplicationsRequestProto getProto() {
 mergeLocalToProto();
 proto = viaProto ? proto : builder.build();
 viaProto = true;
@@ -175,13 +175,13 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public Set getApplicationTypes() {
+  public synchronized Set getApplicationTypes() {
 initApplicationTypes();
 return this.applicationTypes;
   }
 
   @Override
-  public void setApplicationTypes(Set applicationTypes) {
+  public synchronized void setApplicationTypes(Set applicationTypes) {
 maybeInitBuilder();
 if (applicationTypes == null)
   builder.clearApplicationTypes();
@@ -198,13 +198,13 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public Set getApplicationTags() {
+  public synchronized Set getApplicationTags() {
 initApplicationTags();
 return this.applicationTags;
   }
 
   @Override
-  public void setApplicationTags(Set tags) {
+  public synchronized void setApplicationTags(Set tags) {
 maybeInitBuilder();
 if (tags == null || tags.isEmpty()) {
   builder.clearApplicationTags();
@@ -219,7 +219,7 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public EnumSet getApplicationStates() {
+  public synchronized EnumSet getApplicationStates() {
 initApplicationStates();
 return this.applicationStates;
   }
@@ -233,12 +233,12 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public ApplicationsRequestScope getScope() {
+  public synchronized ApplicationsRequestScope getScope() {
 initScope();
 return this.scope;
   }
 
-  public void setScope(ApplicationsRequestScope scope) {
+  public synchronized void setScope(ApplicationsRequestScope scope) {
 maybeInitBuilder();
 if (scope == null) {
   builder.clearScope();
@@ -247,7 +247,7 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public void setApplicationStates(EnumSet 
applicationStates) {
+  public synchronized void setApplicationStates(EnumSet 
applicationStates) {
 maybeInitBuilder();
 if (applicationStates == null) {
   builder.clearApplicationStates();
@@ -256,7 +256,7 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public void setApplicationStates(Set applicationStates) {
+  public synchronized void setApplicationStates(Set applicationStates) 
{
 EnumSet appStates = null;
 for (YarnApplicationState state : YarnApplicationState.values()) {
   if (applicationStates.contains(
@@ -272,12 +272,12 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   }
 
   @Override
-  public Set getUsers() {
+  public synchronized Set getUsers() {
 initUsers();
 return this.users;
   }
 
-  public void setUsers(Set users) {
+  public 

[35/50] hadoop git commit: HDFS-13583. RBF: Router admin clrQuota is not synchronized with nameservice. Contributed by Dibyendu Karmakar.

2018-07-26 Thread eyang
HDFS-13583. RBF: Router admin clrQuota is not synchronized with nameservice. 
Contributed by Dibyendu Karmakar.

(cherry picked from commit 17a87977f29ced49724f561a68565217c8cb4e94)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5aca0588
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5aca0588
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5aca0588

Branch: refs/remotes/origin/branch-3.1
Commit: 5aca0588ea4ebef8dd4aca40e5dd414d0db23b61
Parents: 23b8546
Author: Yiqun Lin 
Authored: Tue Jul 24 11:15:47 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Jul 24 11:19:21 2018 +0800

--
 .../hdfs/server/federation/router/Quota.java|  9 ++-
 .../federation/router/RouterAdminServer.java|  8 --
 .../federation/router/RouterQuotaManager.java   |  4 +--
 .../router/RouterQuotaUpdateService.java|  2 +-
 .../federation/router/RouterQuotaUsage.java |  4 +--
 .../federation/store/records/MountTable.java|  4 +--
 .../store/records/impl/pb/MountTablePBImpl.java |  4 +--
 .../hdfs/tools/federation/RouterAdmin.java  |  8 +++---
 .../federation/router/TestRouterAdmin.java  |  8 ++
 .../federation/router/TestRouterAdminCLI.java   | 16 +---
 .../federation/router/TestRouterQuota.java  | 26 +---
 .../router/TestRouterQuotaManager.java  | 20 +++
 .../store/records/TestMountTable.java   |  4 +--
 13 files changed, 82 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aca0588/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index 75d3e04..846ccd1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -162,6 +162,8 @@ public class Quota {
   private QuotaUsage aggregateQuota(Map results) {
 long nsCount = 0;
 long ssCount = 0;
+long nsQuota = HdfsConstants.QUOTA_RESET;
+long ssQuota = HdfsConstants.QUOTA_RESET;
 boolean hasQuotaUnSet = false;
 
 for (Map.Entry entry : results.entrySet()) {
@@ -173,6 +175,8 @@ public class Quota {
 if (usage.getQuota() == -1 && usage.getSpaceQuota() == -1) {
   hasQuotaUnSet = true;
 }
+nsQuota = usage.getQuota();
+ssQuota = usage.getSpaceQuota();
 
 nsCount += usage.getFileAndDirectoryCount();
 ssCount += usage.getSpaceConsumed();
@@ -187,7 +191,10 @@ public class Quota {
 QuotaUsage.Builder builder = new QuotaUsage.Builder()
 .fileAndDirectoryCount(nsCount).spaceConsumed(ssCount);
 if (hasQuotaUnSet) {
-  builder.quota(HdfsConstants.QUOTA_DONT_SET);
+  builder.quota(HdfsConstants.QUOTA_RESET)
+  .spaceQuota(HdfsConstants.QUOTA_RESET);
+} else {
+  builder.quota(nsQuota).spaceQuota(ssQuota);
 }
 
 return builder.build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aca0588/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
index 8e23eca..114f008 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
@@ -28,6 +28,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import 
org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService;
 import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
 import 
org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB;
@@ -253,8 +254,11 @@ public class RouterAdminServer extends AbstractService
 
 if (nsQuota != HdfsConstants.QUOTA_DONT_SET
 || ssQuota != 

[39/50] hadoop git commit: YARN-6966. NodeManager metrics may return wrong negative values when NM restart. (Szilard Nemeth via Haibo Chen)

2018-07-26 Thread eyang
YARN-6966. NodeManager metrics may return wrong negative values when NM 
restart. (Szilard Nemeth via Haibo Chen)

(cherry picked from commit 9d3c39e9dd88b8f32223c01328581bb68507d415)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e7792dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e7792dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e7792dd

Branch: refs/remotes/origin/branch-3.1
Commit: 7e7792dd7b4d97a10af1dd583fc65214b4b9c009
Parents: 4488fd8
Author: Haibo Chen 
Authored: Mon Jul 23 11:06:44 2018 -0700
Committer: Haibo Chen 
Committed: Tue Jul 24 12:50:43 2018 -0700

--
 .../containermanager/ContainerManagerImpl.java  |  2 +-
 .../scheduler/ContainerScheduler.java   | 16 --
 .../recovery/NMLeveldbStateStoreService.java| 32 ++-
 .../recovery/NMNullStateStoreService.java   |  2 +-
 .../recovery/NMStateStoreService.java   |  3 +-
 .../BaseContainerManagerTest.java   |  2 +-
 .../TestContainerManagerRecovery.java   | 57 
 .../TestContainerSchedulerRecovery.java | 46 +++-
 .../metrics/TestNodeManagerMetrics.java |  4 +-
 .../recovery/NMMemoryStateStoreService.java | 16 +-
 .../TestNMLeveldbStateStoreService.java | 21 +++-
 11 files changed, 163 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e7792dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 26d06aa..ce240bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -496,7 +496,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
 Container container = new ContainerImpl(getConfig(), dispatcher,
 launchContext, credentials, metrics, token, context, rcs);
 context.getContainers().put(token.getContainerID(), container);
-containerScheduler.recoverActiveContainer(container, rcs.getStatus());
+containerScheduler.recoverActiveContainer(container, rcs);
 app.handle(new ApplicationContainerInitEvent(container));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e7792dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index 57368ab..e6b0f46 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -41,6 +41,9 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.Contai
 
 
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService
+.RecoveredContainerState;
 import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerStatus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -229,11 +232,11 @@ public class ContainerScheduler extends AbstractService 
implements
* @param rcs Recovered Container status
*/
   public 

[48/50] hadoop git commit: YARN-8330. Improved publishing ALLOCATED events to ATS. Contributed by Suma Shivaprasad

2018-07-26 Thread eyang
YARN-8330.  Improved publishing ALLOCATED events to ATS.
Contributed by Suma Shivaprasad

(cherry picked from commit f93ecf5c1e0b3db27424963814fc01ec43eb76e0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e3807af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e3807af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e3807af

Branch: refs/remotes/origin/branch-3.1
Commit: 8e3807afe007150f3f0ea0ac3a6c48913e31965b
Parents: 964f345
Author: Eric Yang 
Authored: Wed Jul 25 18:49:30 2018 -0400
Committer: Eric Yang 
Committed: Wed Jul 25 18:51:42 2018 -0400

--
 .../rmcontainer/RMContainerImpl.java| 64 +++-
 .../rmcontainer/TestRMContainerImpl.java| 11 +++-
 2 files changed, 43 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e3807af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index efac666..945e7cb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
@@ -244,23 +244,13 @@ public class RMContainerImpl implements RMContainer {
 this.readLock = lock.readLock();
 this.writeLock = lock.writeLock();
 
-saveNonAMContainerMetaInfo = rmContext.getYarnConfiguration().getBoolean(
-   YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO,
-   YarnConfiguration
- .DEFAULT_APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO);
+saveNonAMContainerMetaInfo =
+shouldPublishNonAMContainerEventstoATS(rmContext);
 
 if (container.getId() != null) {
   rmContext.getRMApplicationHistoryWriter().containerStarted(this);
 }
 
-// If saveNonAMContainerMetaInfo is true, store system metrics for all
-// containers. If false, and if this container is marked as the AM, metrics
-// will still be published for this container, but that calculation happens
-// later.
-if (saveNonAMContainerMetaInfo && null != container.getId()) {
-  rmContext.getSystemMetricsPublisher().containerCreated(
-  this, this.creationTime);
-}
 if (this.container != null) {
   this.allocationTags = this.container.getAllocationTags();
 }
@@ -590,8 +580,12 @@ public class RMContainerImpl implements RMContainer {
   container.getNodeId(), container.getContainerId(),
   container.getAllocationTags());
 
-  container.eventHandler.handle(new RMAppAttemptEvent(
-  container.appAttemptId, RMAppAttemptEventType.CONTAINER_ALLOCATED));
+  container.eventHandler.handle(
+  new RMAppAttemptEvent(container.appAttemptId,
+  RMAppAttemptEventType.CONTAINER_ALLOCATED));
+
+  publishNonAMContainerEventstoATS(container);
+
 }
   }
 
@@ -610,9 +604,11 @@ public class RMContainerImpl implements RMContainer {
   // Tell the app
   container.eventHandler.handle(new RMAppRunningOnNodeEvent(container
   .getApplicationAttemptId().getApplicationId(), container.nodeId));
+
+  publishNonAMContainerEventstoATS(container);
 }
   }
-  
+
   private static final class ContainerAcquiredWhileRunningTransition extends
   BaseTransition {
 
@@ -718,17 +714,12 @@ public class RMContainerImpl implements RMContainer {
 container);
 
   boolean saveNonAMContainerMetaInfo =
-  container.rmContext.getYarnConfiguration().getBoolean(
-  YarnConfiguration
-.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO,
-  YarnConfiguration
-.DEFAULT_APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO);
+  shouldPublishNonAMContainerEventstoATS(container.rmContext);
 
   if (saveNonAMContainerMetaInfo || container.isAMContainer()) {
 container.rmContext.getSystemMetricsPublisher().containerFinished(
 container, container.finishTime);
   }
-
 }
 
 private static void 

[19/50] hadoop git commit: Fix potential FSImage corruption. Contributed by Ekanth Sethuramalingam & Arpit Agarwal.

2018-07-26 Thread eyang
Fix potential FSImage corruption. Contributed by Ekanth Sethuramalingam & Arpit 
Agarwal.

(cherry picked from commit 0a1e922f3d8eca4e852be57124ec1bcafaadb289)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53c7d82d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53c7d82d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53c7d82d

Branch: refs/remotes/origin/branch-3.1
Commit: 53c7d82d539f1a4afcb37ebeaaa0a1a7c25fe942
Parents: 34f1dd0
Author: Konstantin V Shvachko 
Authored: Mon Jul 16 18:20:24 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Jul 16 18:29:43 2018 -0700

--
 .../server/namenode/AclEntryStatusFormat.java   |  6 +-
 .../namenode/INodeWithAdditionalFields.java |  4 +-
 .../hdfs/server/namenode/XAttrFormat.java   | 67 +---
 3 files changed, 49 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c7d82d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
index 82aa214..2c5b23b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
@@ -38,7 +38,8 @@ import com.google.common.collect.ImmutableList;
  * [1:3) -- the type of the entry (AclEntryType) 
  * [3:6) -- the permission of the entry (FsAction) 
  * [6:7) -- A flag to indicate whether Named entry or not 
- * [7:32) -- the name of the entry, which is an ID that points to a 
+ * [7:8) -- Reserved 
+ * [8:32) -- the name of the entry, which is an ID that points to a 
  * string in the StringTableSection. 
  */
 public enum AclEntryStatusFormat {
@@ -47,7 +48,8 @@ public enum AclEntryStatusFormat {
   TYPE(SCOPE.BITS, 2),
   PERMISSION(TYPE.BITS, 3),
   NAMED_ENTRY_CHECK(PERMISSION.BITS, 1),
-  NAME(NAMED_ENTRY_CHECK.BITS, 25);
+  RESERVED(NAMED_ENTRY_CHECK.BITS, 1),
+  NAME(RESERVED.BITS, 24);
 
   private final LongBitFormat BITS;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c7d82d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
index 9adcc3e..84d99e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
@@ -35,8 +35,8 @@ public abstract class INodeWithAdditionalFields extends INode
 implements LinkedElement {
   enum PermissionStatusFormat {
 MODE(null, 16),
-GROUP(MODE.BITS, 25),
-USER(GROUP.BITS, 23);
+GROUP(MODE.BITS, 24),
+USER(GROUP.BITS, 24);
 
 final LongBitFormat BITS;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53c7d82d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
index 7e704d0..f9f06db 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java
@@ -27,25 +27,56 @@ import org.apache.hadoop.hdfs.XAttrHelper;
 
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Ints;
+import org.apache.hadoop.hdfs.util.LongBitFormat;
 
 /**
  * Class to pack XAttrs into byte[].
  * For each XAttr:
  *   The first 4 bytes represents XAttr namespace and name
  * [0:3)  - XAttr namespace
- * [3:32) - The name of the entry, which is an ID that points to a
+ * [3:8) - Reserved
+ * [8:32) - The name of the entry, which is an ID that points to a
  *  string in map
  *   The following two bytes represents the length of XAttr value
  * 

[26/50] hadoop git commit: HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled reliably fails. Contributed by Weiwei Yang.

2018-07-26 Thread eyang
HADOOP-15614. TestGroupsCaching.testExceptionOnBackgroundRefreshHandled 
reliably fails. Contributed by Weiwei Yang.

(cherry picked from commit ccf2db7fc2688d262df3309007cb12a4dfedc179)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a147098c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a147098c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a147098c

Branch: refs/remotes/origin/branch-3.1
Commit: a147098c4fab435f1c8962e1fa5b22bf6a3b84f0
Parents: a607c02
Author: Kihwal Lee 
Authored: Thu Jul 19 11:19:19 2018 -0500
Committer: Kihwal Lee 
Committed: Thu Jul 19 11:19:19 2018 -0500

--
 .../apache/hadoop/security/TestGroupsCaching.java  | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a147098c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index 46e36b3..bba8152 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -561,23 +561,28 @@ public class TestGroupsCaching {
 
 // Then expire that entry
 timer.advance(4 * 1000);
+// Pause the getGroups operation and this will delay the cache refresh
+FakeGroupMapping.pause();
 
 // Now get the cache entry - it should return immediately
 // with the old value and the cache will not have completed
 // a request to getGroups yet.
 assertEquals(groups.getGroups("me").size(), 2);
 assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount());
+// Resume the getGroups operation and the cache can get refreshed
+FakeGroupMapping.resume();
 
-// Now sleep for a short time and re-check the request count. It should 
have
-// increased, but the exception means the cache will not have updated
-Thread.sleep(50);
+// Now wait for the refresh done, because of the exception, we expect
+// a onFailure callback gets called and the counter for failure is 1
+waitForGroupCounters(groups, 0, 0, 0, 1);
 FakeGroupMapping.setThrowException(false);
 assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 2);
 
-// Now sleep another short time - the 3rd call to getGroups above
-// will have kicked off another refresh that updates the cache
-Thread.sleep(50);
+// Now the 3rd call to getGroups above will have kicked off
+// another refresh that updates the cache, since it no longer gives
+// exception, we now expect the counter for success is 1.
+waitForGroupCounters(groups, 0, 0, 1, 1);
 assertEquals(startingRequestCount + 2, FakeGroupMapping.getRequestCount());
 assertEquals(groups.getGroups("me").size(), 3);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] hadoop git commit: YARN-8511. When AM releases a container, RM removes allocation tags before it is released by NM. (Weiwei Yang via wangda)

2018-07-26 Thread eyang
YARN-8511. When AM releases a container, RM removes allocation tags before it 
is released by NM. (Weiwei Yang via wangda)

Change-Id: I6f9f409f2ef685b405cbff547dea9623bf3322d9
(cherry picked from commit 752dcce5f4cf0f6ebcb40a61f622f1a885c4bda7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44beab0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44beab0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44beab0b

Branch: refs/remotes/origin/branch-3.1
Commit: 44beab0b63cb853f2f711d9592fcd947241d112e
Parents: ac9155d
Author: Wangda Tan 
Authored: Mon Jul 16 10:54:41 2018 -0700
Committer: Wangda Tan 
Committed: Mon Jul 16 11:04:08 2018 -0700

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   6 ++
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   6 ++
 .../rmcontainer/RMContainerImpl.java|   5 -
 .../server/resourcemanager/rmnode/RMNode.java   |   6 ++
 .../resourcemanager/rmnode/RMNodeImpl.java  |   5 +
 .../scheduler/SchedulerNode.java|  15 +++
 .../yarn/server/resourcemanager/MockNodes.java  |   5 +
 .../rmcontainer/TestRMContainerImpl.java|  16 ++-
 .../scheduler/TestAbstractYarnScheduler.java| 104 +++
 9 files changed, 162 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44beab0b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 0c99139..69946c8 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode
@@ -219,6 +220,11 @@ public class NodeInfo {
 }
 
 @Override
+public RMContext getRMContext() {
+  return null;
+}
+
+@Override
 public Resource getPhysicalResource() {
   return null;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44beab0b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 78645e9..a96b790 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode
@@ -207,6 +208,11 @@ public class RMNodeWrapper implements RMNode {
   }
 
   @Override
+  public RMContext getRMContext() {
+return node.getRMContext();
+  }
+
+  @Override
   public Resource getPhysicalResource() {
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44beab0b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 

[25/50] hadoop git commit: HADOOP-15610. Fixed pylint version for Hadoop docker image. Contributed by Jack Bearden

2018-07-26 Thread eyang
HADOOP-15610.  Fixed pylint version for Hadoop docker image.
   Contributed by Jack Bearden

(cherry picked from commit ba1ab08fdae96ad7c9c4f4bf8672abd741b7f758)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a607c02f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a607c02f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a607c02f

Branch: refs/remotes/origin/branch-3.1
Commit: a607c02f15f6346d06dd2830737037f8556c7d89
Parents: dfa7142
Author: Eric Yang 
Authored: Wed Jul 18 20:09:43 2018 -0400
Committer: Eric Yang 
Committed: Wed Jul 18 20:11:39 2018 -0400

--
 dev-support/docker/Dockerfile | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a607c02f/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 6d05fe1..e1aa00f 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -151,9 +151,10 @@ RUN apt-get -q update && apt-get -q install -y shellcheck
 RUN apt-get -q update && apt-get -q install -y bats
 
 
-# Install pylint (always want latest)
+# Install pylint at fixed version (2.0.0 removed python2 support)
+# https://github.com/PyCQA/pylint/issues/2294
 
-RUN pip2 install pylint
+RUN pip2 install pylint==1.9.2
 
 
 # Install dateutil.parser


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] hadoop git commit: YARN-8545. Return allocated resource to RM for failed container. Contributed by Chandni Singh

2018-07-26 Thread eyang
YARN-8545.  Return allocated resource to RM for failed container.
Contributed by Chandni Singh

(cherry picked from commit 40fad32824d2f8f960c779d78357e62103453da0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/177f6045
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/177f6045
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/177f6045

Branch: refs/remotes/origin/branch-3.1
Commit: 177f6045ac4ae6e2dbae2e04da8c9cebb5da8748
Parents: 8e3807a
Author: Eric Yang 
Authored: Thu Jul 26 18:22:57 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 18:25:41 2018 -0400

--
 .../hadoop/yarn/service/ServiceScheduler.java   |  3 +-
 .../yarn/service/component/Component.java   | 42 +++-
 .../component/instance/ComponentInstance.java   | 21 +++---
 .../instance/ComponentInstanceEvent.java|  2 +
 .../containerlaunch/ContainerLaunchService.java | 12 --
 .../hadoop/yarn/service/MockServiceAM.java  | 34 +++-
 .../hadoop/yarn/service/TestServiceAM.java  | 35 
 .../yarn/service/component/TestComponent.java   |  3 +-
 .../instance/TestComponentInstance.java | 26 ++--
 9 files changed, 135 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/177f6045/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index d3e8e4f..cfaf356 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -687,7 +687,8 @@ public class ServiceScheduler extends CompositeService {
 }
 ComponentEvent event =
 new ComponentEvent(instance.getCompName(), CONTAINER_COMPLETED)
-.setStatus(status).setInstance(instance);
+.setStatus(status).setInstance(instance)
+.setContainerId(containerId);
 dispatcher.getEventHandler().handle(event);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/177f6045/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index a1ee796..aaa23da 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.service.component;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
@@ -518,10 +519,10 @@ public class Component implements 
EventHandler {
   private static class ContainerCompletedTransition extends BaseTransition {
 @Override
 public void transition(Component component, ComponentEvent event) {
-
+  Preconditions.checkNotNull(event.getContainerId());
   component.updateMetrics(event.getStatus());
   component.dispatcher.getEventHandler().handle(
-  new ComponentInstanceEvent(event.getStatus().getContainerId(), STOP)
+  new ComponentInstanceEvent(event.getContainerId(), STOP)
   .setStatus(event.getStatus()));
 
   ComponentRestartPolicy restartPolicy =
@@ -784,28 

[12/50] hadoop git commit: YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. Contributed by Jim Brennan

2018-07-26 Thread eyang
YARN-8515. container-executor can crash with SIGPIPE after nodemanager restart. 
Contributed by Jim Brennan

(cherry picked from commit 17118f446c2387aa796849da8b69a845d9d307d3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cbb9597
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cbb9597
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cbb9597

Branch: refs/remotes/origin/branch-3.1
Commit: 7cbb9597c43d0e4270a64d28b5521941ce940a1a
Parents: d5d9875
Author: Jason Lowe 
Authored: Fri Jul 13 10:05:25 2018 -0500
Committer: Jason Lowe 
Committed: Fri Jul 13 10:06:38 2018 -0500

--
 .../src/main/native/container-executor/impl/main.c | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cbb9597/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index c54fd3e..3d7b19a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -31,6 +31,7 @@
 #include 
 #include 
 #include 
+#include 
 
 static void display_usage(FILE *stream) {
   fprintf(stream,
@@ -112,6 +113,11 @@ static void open_log_files() {
   if (ERRORFILE == NULL) {
 ERRORFILE = stderr;
   }
+
+  // There may be a process reading from stdout/stderr, and if it
+  // exits, we will crash on a SIGPIPE when we try to write to them.
+  // By ignoring SIGPIPE, we can handle the EPIPE instead of crashing.
+  signal(SIGPIPE, SIG_IGN);
 }
 
 /* Flushes and closes log files */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] hadoop git commit: YARN-8473. Containers being launched as app tears down can leave containers in NEW state. Contributed by Jason Lowe.

2018-07-26 Thread eyang
YARN-8473. Containers being launched as app tears down can leave containers in 
NEW state. Contributed by Jason Lowe.

(cherry picked from commit 705e2c1f7cba51496b0d019ecedffbe5fb55c28b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f10491e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f10491e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f10491e

Branch: refs/remotes/origin/branch-3.1
Commit: 6f10491e646ca69d05b7c676ba92e17e9a64c3b7
Parents: d54241e
Author: Sunil G 
Authored: Tue Jul 10 20:11:47 2018 +0530
Committer: Sunil G 
Committed: Tue Jul 10 20:12:47 2018 +0530

--
 .../application/ApplicationImpl.java| 36 ++---
 .../application/TestApplication.java| 53 
 2 files changed, 71 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f10491e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index 39be7a7..6d84fb2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -211,6 +211,9 @@ public class ApplicationImpl implements Application {
   private static final ContainerDoneTransition CONTAINER_DONE_TRANSITION =
   new ContainerDoneTransition();
 
+  private static final InitContainerTransition INIT_CONTAINER_TRANSITION =
+  new InitContainerTransition();
+
   private static StateMachineFactory stateMachineFactory =
   new StateMachineFactoryhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/6f10491e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
index c8f28e2..cbe19ff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java
@@ -360,35 +360,66 @@ public class TestApplication {
 }
   }
 
-//TODO Re-work after Application transitions are changed.
-//  @Test
+  @Test
   @SuppressWarnings("unchecked")
-  public void testStartContainerAfterAppFinished() {
+  public void testStartContainerAfterAppRunning() {
 WrappedApplication wa = null;
 try {
-  wa = new WrappedApplication(5, 314159265358979L, "yak", 3);
+  wa = new WrappedApplication(5, 314159265358979L, "yak", 4);
   wa.initApplication();
-  wa.initContainer(-1);
+  wa.initContainer(0);
   assertEquals(ApplicationState.INITING, wa.app.getApplicationState());
   wa.applicationInited();
   assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState());
 
-  reset(wa.localizerBus);
-  wa.containerFinished(0);
-  wa.containerFinished(1);
-  wa.containerFinished(2);
   assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState());
-  assertEquals(0, wa.app.getContainers().size());
+  assertEquals(1, wa.app.getContainers().size());
 
   wa.appFinished();
+  verify(wa.containerBus).handle(
+  argThat(new ContainerKillMatcher(wa.containers.get(0)
+  .getContainerId(;
+  assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,
+  

[14/50] hadoop git commit: YARN-8421: when moving app, activeUsers is increased, even though app does not have outstanding request. Contributed by Kyungwan Nam

2018-07-26 Thread eyang
YARN-8421: when moving app, activeUsers is increased, even though app does not 
have outstanding request. Contributed by Kyungwan Nam

(cherry picked from commit 937ef39b3ff90f72392b7a319e4346344db34e03)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a79e893
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a79e893
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a79e893

Branch: refs/remotes/origin/branch-3.1
Commit: 9a79e893f74ab97571d156b4f39a3b751aad240f
Parents: 677bbdc
Author: Eric E Payne 
Authored: Mon Jul 16 16:24:21 2018 +
Committer: Eric E Payne 
Committed: Mon Jul 16 16:32:05 2018 +

--
 .../scheduler/AppSchedulingInfo.java|  4 +-
 .../TestSchedulerApplicationAttempt.java| 44 
 2 files changed, 47 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a79e893/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 1efdd8b..8074f06 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -578,7 +578,9 @@ public class AppSchedulingInfo {
   newMetrics.moveAppTo(this);
   abstractUsersManager.deactivateApplication(user, applicationId);
   abstractUsersManager = newQueue.getAbstractUsersManager();
-  abstractUsersManager.activateApplication(user, applicationId);
+  if (!schedulerKeys.isEmpty()) {
+abstractUsersManager.activateApplication(user, applicationId);
+  }
   this.queue = newQueue;
 } finally {
   this.writeLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a79e893/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
index 17f9d23..c110b1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
@@ -58,6 +58,50 @@ public class TestSchedulerApplicationAttempt {
 QueueMetrics.clearQueueMetrics();
 DefaultMetricsSystem.shutdown();
   }
+
+  @Test
+  public void testActiveUsersWhenMove() {
+final String user = "user1";
+Queue parentQueue = createQueue("parent", null);
+Queue queue1 = createQueue("queue1", parentQueue);
+Queue queue2 = createQueue("queue2", parentQueue);
+Queue queue3 = createQueue("queue3", parentQueue);
+
+ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
+RMContext rmContext = mock(RMContext.class);
+when(rmContext.getEpoch()).thenReturn(3L);
+SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId,
+user, queue1, queue1.getAbstractUsersManager(), rmContext);
+
+// Resource request
+Resource requestedResource = Resource.newInstance(1536, 2);
+Priority requestedPriority = Priority.newInstance(2);
+ResourceRequest request = ResourceRequest.newInstance(requestedPriority,
+ResourceRequest.ANY, requestedResource, 1);
+app.updateResourceRequests(Arrays.asList(request));
+
+assertEquals(1, queue1.getAbstractUsersManager().getNumActiveUsers());
+// move app from queue1 to 

[20/50] hadoop git commit: HDFS-13733. RBF: Add Web UI configurations and descriptions to RBF document. Contributed by Takanobu Asanuma.

2018-07-26 Thread eyang
HDFS-13733. RBF: Add Web UI configurations and descriptions to RBF document. 
Contributed by Takanobu Asanuma.

(cherry picked from commit 1af87df242c4286474961078d306a5692f85debc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/228508a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/228508a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/228508a0

Branch: refs/remotes/origin/branch-3.1
Commit: 228508a0eef7015e5e03dec0be71afd12c311d6d
Parents: 53c7d82
Author: Yiqun Lin 
Authored: Tue Jul 17 10:45:08 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Jul 17 10:47:07 2018 +0800

--
 .../src/site/markdown/HDFSRouterFederation.md   | 12 
 1 file changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/228508a0/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
index 73e0f4a..c5bf5e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/site/markdown/HDFSRouterFederation.md
@@ -330,6 +330,18 @@ The administration server to manage the Mount Table.
 | dfs.federation.router.admin-bind-host | 0.0.0.0 | The actual address the RPC 
admin server will bind to. |
 | dfs.federation.router.admin.handler.count | 1 | The number of server threads 
for the router to handle RPC requests from admin. |
 
+### HTTP Server
+
+The HTTP Server to provide Web UI and the HDFS REST interface 
([WebHDFS](../hadoop-hdfs/WebHDFS.html)) for the clients. The default URL is 
"`http://router_host:50071`;.
+
+| Property | Default | Description|
+|: |: |: |
+| dfs.federation.router.http.enable | `true` | If `true`, the HTTP service to 
handle client requests in the router is enabled. |
+| dfs.federation.router.http-address | 0.0.0.0:50071 | HTTP address that 
handles the web requests to the Router. |
+| dfs.federation.router.http-bind-host | 0.0.0.0 | The actual address the HTTP 
server will bind to. |
+| dfs.federation.router.https-address | 0.0.0.0:50072 | HTTPS address that 
handles the web requests to the Router. |
+| dfs.federation.router.https-bind-host | 0.0.0.0 | The actual address the 
HTTPS server will bind to. |
+
 ### State Store
 
 The connection to the State Store and the internal caching at the Router.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] hadoop git commit: YARN-4606. CapacityScheduler: applications could get starved because computation of #activeUsers considers pending apps. Contributed by Manikandan R

2018-07-26 Thread eyang
YARN-4606. CapacityScheduler: applications could get starved because 
computation of #activeUsers considers pending apps. Contributed by Manikandan R

(cherry picked from commit 9485c9aee6e9bb935c3e6ae4da81d70b621781de)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/830ef12a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/830ef12a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/830ef12a

Branch: refs/remotes/origin/branch-3.1
Commit: 830ef12af830de5d54e51d2b1d16c56f5eb78e43
Parents: d2212c2
Author: Eric E Payne 
Authored: Wed Jul 25 16:22:04 2018 +
Committer: Eric E Payne 
Committed: Wed Jul 25 16:30:30 2018 +

--
 .../scheduler/capacity/UsersManager.java|  27 +++-
 .../capacity/TestCapacityScheduler.java | 128 +++
 .../capacity/TestContainerAllocation.java   |  43 +++
 3 files changed, 197 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/830ef12a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
index 747a488..83ee6c0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
@@ -85,6 +85,7 @@ public class UsersManager implements AbstractUsersManager {
 
   private final QueueMetrics metrics;
   private AtomicInteger activeUsers = new AtomicInteger(0);
+  private AtomicInteger activeUsersWithOnlyPendingApps = new AtomicInteger(0);
   private Map> usersApplications =
   new HashMap>();
 
@@ -671,9 +672,23 @@ public class UsersManager implements AbstractUsersManager {
 // update in local storage
 userLimitPerSchedulingMode.put(schedulingMode, computedUserLimit);
 
+computeNumActiveUsersWithOnlyPendingApps();
+
 return userLimitPerSchedulingMode;
   }
 
+  // This method is called within the lock.
+  private void computeNumActiveUsersWithOnlyPendingApps() {
+int numPendingUsers = 0;
+for (User user : users.values()) {
+  if ((user.getPendingApplications() > 0)
+  && (user.getActiveApplications() <= 0)) {
+numPendingUsers++;
+  }
+}
+activeUsersWithOnlyPendingApps = new AtomicInteger(numPendingUsers);
+  }
+
   private Resource computeUserLimit(String userName, Resource clusterResource,
   String nodePartition, SchedulingMode schedulingMode, boolean activeUser) 
{
 Resource partitionResource = labelManager.getResourceByLabel(nodePartition,
@@ -839,6 +854,11 @@ public class UsersManager implements AbstractUsersManager {
 try {
   this.writeLock.lock();
 
+  User userDesc = getUser(user);
+  if (userDesc != null && userDesc.getActiveApplications() <= 0) {
+return;
+  }
+
   Set userApps = usersApplications.get(user);
   if (userApps == null) {
 userApps = new HashSet();
@@ -893,7 +913,7 @@ public class UsersManager implements AbstractUsersManager {
 
   @Override
   public int getNumActiveUsers() {
-return activeUsers.get();
+return activeUsers.get() + activeUsersWithOnlyPendingApps.get();
   }
 
   float sumActiveUsersTimesWeights() {
@@ -1090,4 +1110,9 @@ public class UsersManager implements AbstractUsersManager 
{
   this.writeLock.unlock();
 }
   }
+
+  @VisibleForTesting
+  public int getNumActiveUsersWithOnlyPendingApps() {
+return activeUsersWithOnlyPendingApps.get();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/830ef12a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
 

[09/50] hadoop git commit: HDFS-13723. Occasional "Should be different group" error in TestRefreshUserMappings#testGroupMappingRefresh. Contributed by Siyao Meng.

2018-07-26 Thread eyang
HDFS-13723. Occasional "Should be different group" error in 
TestRefreshUserMappings#testGroupMappingRefresh. Contributed by Siyao Meng.

(cherry picked from commit 162228e8db937d4bdb9cf61d15ed555f1c96368f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9fa3cb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9fa3cb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9fa3cb1

Branch: refs/remotes/origin/branch-3.1
Commit: f9fa3cb1570af6497fdb51f47a457268fbea6bd5
Parents: 99e98bf
Author: Wei-Chiu Chuang 
Authored: Wed Jul 11 10:02:08 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed Jul 11 10:04:07 2018 -0700

--
 .../java/org/apache/hadoop/security/Groups.java  |  5 -
 .../hadoop/security/TestRefreshUserMappings.java | 19 +--
 2 files changed, 17 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9fa3cb1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
index ad09865..63ec9a5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
@@ -73,7 +73,8 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public class Groups {
-  private static final Logger LOG = LoggerFactory.getLogger(Groups.class);
+  @VisibleForTesting
+  static final Logger LOG = LoggerFactory.getLogger(Groups.class);
   
   private final GroupMappingServiceProvider impl;
 
@@ -308,6 +309,7 @@ public class Groups {
  */
 @Override
 public List load(String user) throws Exception {
+  LOG.debug("GroupCacheLoader - load.");
   TraceScope scope = null;
   Tracer tracer = Tracer.curThreadTracer();
   if (tracer != null) {
@@ -346,6 +348,7 @@ public class Groups {
 public ListenableFuture> reload(final String key,
  List oldValue)
 throws Exception {
+  LOG.debug("GroupCacheLoader - reload (async).");
   if (!reloadGroupsInBackground) {
 return super.reload(key, oldValue);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9fa3cb1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
index f511eb1..0e7dfc3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
@@ -45,6 +45,8 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.slf4j.event.Level;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -93,6 +95,8 @@ public class TestRefreshUserMappings {
 FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
 cluster = new MiniDFSCluster.Builder(config).build();
 cluster.waitActive();
+
+GenericTestUtils.setLogLevel(Groups.LOG, Level.DEBUG);
   }
 
   @After
@@ -114,21 +118,24 @@ public class TestRefreshUserMappings {
 String [] args =  new String[]{"-refreshUserToGroupsMappings"};
 Groups groups = Groups.getUserToGroupsMappingService(config);
 String user = UserGroupInformation.getCurrentUser().getUserName();
-System.out.println("first attempt:");
+
+System.out.println("First attempt:");
 List g1 = groups.getGroups(user);
 String [] str_groups = new String [g1.size()];
 g1.toArray(str_groups);
 System.out.println(Arrays.toString(str_groups));
 
-System.out.println("second attempt, should be same:");
+System.out.println("Second attempt, should be the same:");
 List g2 = groups.getGroups(user);
 g2.toArray(str_groups);
 System.out.println(Arrays.toString(str_groups));
 for(int i=0; i g3 = groups.getGroups(user);
 g3.toArray(str_groups);
 

[50/50] hadoop git commit: HADOOP-15593. Fixed NPE in UGI spawnAutoRenewalThreadForUserCreds. Contributed by Gabor Bota

2018-07-26 Thread eyang
HADOOP-15593.  Fixed NPE in UGI spawnAutoRenewalThreadForUserCreds.
   Contributed by Gabor Bota

(cherry picked from commit 77721f39e26b630352a1f4087524a3fbd21ff06e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a869bd97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a869bd97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a869bd97

Branch: refs/remotes/origin/branch-3.1
Commit: a869bd970e832c4d770b3cee6257225260f4d235
Parents: 177f604
Author: Eric Yang 
Authored: Thu Jul 26 18:35:36 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 18:47:58 2018 -0400

--
 .../hadoop/security/UserGroupInformation.java   | 179 ---
 .../security/TestUserGroupInformation.java  |  38 
 2 files changed, 148 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a869bd97/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 33a876f..c44ef72 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -40,6 +40,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Date;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -850,81 +851,121 @@ public class UserGroupInformation {
 }
 
 //spawn thread only if we have kerb credentials
-Thread t = new Thread(new Runnable() {
+KerberosTicket tgt = getTGT();
+if (tgt == null) {
+  return;
+}
+String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
+long nextRefresh = getRefreshTime(tgt);
+Thread t =
+new Thread(new AutoRenewalForUserCredsRunnable(tgt, cmd, nextRefresh));
+t.setDaemon(true);
+t.setName("TGT Renewer for " + getUserName());
+t.start();
+  }
+
+  @VisibleForTesting
+  class AutoRenewalForUserCredsRunnable implements Runnable {
+private KerberosTicket tgt;
+private RetryPolicy rp;
+private String kinitCmd;
+private long nextRefresh;
+private boolean runRenewalLoop = true;
+
+AutoRenewalForUserCredsRunnable(KerberosTicket tgt, String kinitCmd,
+long nextRefresh){
+  this.tgt = tgt;
+  this.kinitCmd = kinitCmd;
+  this.nextRefresh = nextRefresh;
+  this.rp = null;
+}
+
+public void setRunRenewalLoop(boolean runRenewalLoop) {
+  this.runRenewalLoop = runRenewalLoop;
+}
 
-  @Override
-  public void run() {
-String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
-KerberosTicket tgt = getTGT();
-if (tgt == null) {
+@Override
+public void run() {
+  do {
+try {
+  long now = Time.now();
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Current time is " + now);
+LOG.debug("Next refresh is " + nextRefresh);
+  }
+  if (now < nextRefresh) {
+Thread.sleep(nextRefresh - now);
+  }
+  String output = Shell.execCommand(kinitCmd, "-R");
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Renewed ticket. kinit output: {}", output);
+  }
+  reloginFromTicketCache();
+  tgt = getTGT();
+  if (tgt == null) {
+LOG.warn("No TGT after renewal. Aborting renew thread for " +
+getUserName());
+return;
+  }
+  nextRefresh = Math.max(getRefreshTime(tgt),
+  now + kerberosMinSecondsBeforeRelogin);
+  metrics.renewalFailures.set(0);
+  rp = null;
+} catch (InterruptedException ie) {
+  LOG.warn("Terminating renewal thread");
   return;
-}
-long nextRefresh = getRefreshTime(tgt);
-RetryPolicy rp = null;
-while (true) {
+} catch (IOException ie) {
+  metrics.renewalFailuresTotal.incr();
+  final long now = Time.now();
+
+  if (tgt.isDestroyed()) {
+LOG.error("TGT is destroyed. Aborting renew thread for {}.",
+getUserName());
+return;
+  }
+
+  long tgtEndTime;
+  // As described in HADOOP-15593 we need to handle the case when
+  // tgt.getEndTime() throws NPE because of JDK issue JDK-8147772
+  // NPE is only possible if 

[28/50] hadoop git commit: HADOOP-15547/ WASB: improve listStatus performance. Contributed by Thomas Marquardt.

2018-07-26 Thread eyang
HADOOP-15547/ WASB: improve listStatus performance.
Contributed by Thomas Marquardt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/749fff57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/749fff57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/749fff57

Branch: refs/remotes/origin/branch-3.1
Commit: 749fff577ed9afb4ef8a54b8948f74be083cc620
Parents: 76b8beb
Author: Steve Loughran 
Authored: Thu Jul 19 12:29:21 2018 -0700
Committer: Steve Loughran 
Committed: Thu Jul 19 12:29:21 2018 -0700

--
 .../dev-support/findbugs-exclude.xml|  10 +
 hadoop-tools/hadoop-azure/pom.xml   |  12 +
 .../fs/azure/AzureNativeFileSystemStore.java| 182 -
 .../apache/hadoop/fs/azure/FileMetadata.java|  77 ++--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 376 ---
 .../hadoop/fs/azure/NativeFileSystemStore.java  |  15 +-
 .../apache/hadoop/fs/azure/PartialListing.java  |  61 ---
 .../hadoop/fs/azure/ITestListPerformance.java   | 196 ++
 8 files changed, 514 insertions(+), 415 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/749fff57/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
index cde1734..38de35e 100644
--- a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
@@ -47,4 +47,14 @@


  
+
+
+
+
+
+
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/749fff57/hadoop-tools/hadoop-azure/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure/pom.xml 
b/hadoop-tools/hadoop-azure/pom.xml
index 978e89e..7265df9 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -43,6 +43,8 @@
 
unset
 
 7200
+
10
+
1000
   
 
   
@@ -298,6 +300,8 @@
 
${fs.azure.scale.test.huge.filesize}
 
${fs.azure.scale.test.huge.partitionsize}
 
${fs.azure.scale.test.timeout}
+
${fs.azure.scale.test.list.performance.threads}
+
${fs.azure.scale.test.list.performance.files}
   
   
 **/Test*.java
@@ -326,6 +330,8 @@
 
${fs.azure.scale.test.huge.filesize}
 
${fs.azure.scale.test.huge.partitionsize}
 
${fs.azure.scale.test.timeout}
+
${fs.azure.scale.test.list.performance.threads}
+
${fs.azure.scale.test.list.performance.files}
   
   
 **/TestRollingWindowAverage*.java
@@ -367,6 +373,8 @@
 
${fs.azure.scale.test.huge.filesize}
 
${fs.azure.scale.test.huge.partitionsize}
 
${fs.azure.scale.test.timeout}
+
${fs.azure.scale.test.list.performance.threads}
+
${fs.azure.scale.test.list.performance.files}
   
   
   
@@ -412,6 +420,8 @@
 
${fs.azure.scale.test.huge.filesize}
 
${fs.azure.scale.test.huge.partitionsize}
 
${fs.azure.scale.test.timeout}
+
${fs.azure.scale.test.list.performance.threads}
+
${fs.azure.scale.test.list.performance.files}
   
   
 
**/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java
@@ -454,6 +464,8 @@
 
${fs.azure.scale.test.enabled}
 
${fs.azure.scale.test.huge.filesize}
 
${fs.azure.scale.test.timeout}
+
${fs.azure.scale.test.list.performance.threads}
+
${fs.azure.scale.test.list.performance.files}
   
   
${fs.azure.scale.test.timeout}
   false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/749fff57/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 9396a51..74bb035 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 

[42/50] hadoop git commit: HADOOP-15612. Improve exception when tfile fails to load LzoCodec. (gera)

2018-07-26 Thread eyang
HADOOP-15612. Improve exception when tfile fails to load LzoCodec. (gera)

(cherry picked from commit 6bec03cfc8bdcf6aa3df9c22231ab959ba31f2f5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1396fa2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1396fa2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1396fa2a

Branch: refs/remotes/origin/branch-3.1
Commit: 1396fa2a2727b8a678628d59768b355845eeb9a9
Parents: 40c06b3
Author: Gera Shegalov 
Authored: Tue Jul 17 00:05:39 2018 -0700
Committer: Gera Shegalov 
Committed: Tue Jul 24 23:05:34 2018 -0700

--
 .../hadoop/io/file/tfile/Compression.java   | 31 +++---
 .../hadoop/io/file/tfile/TestCompression.java   | 34 +++-
 2 files changed, 53 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1396fa2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
index fa85ed7..c4347e0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
@@ -5,9 +5,9 @@
  * licenses this file to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
- * 
+ *
  * http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
@@ -24,6 +24,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.CodecPool;
 import org.apache.hadoop.io.compress.CompressionCodec;
@@ -78,25 +79,33 @@ public final class Compression {
   public enum Algorithm {
 LZO(TFile.COMPRESSION_LZO) {
   private transient boolean checked = false;
+  private transient ClassNotFoundException cnf;
+  private transient boolean reinitCodecInTests;
   private static final String defaultClazz =
   "org.apache.hadoop.io.compress.LzoCodec";
+  private transient String clazz;
   private transient CompressionCodec codec = null;
 
+  private String getLzoCodecClass() {
+String extClazzConf = conf.get(CONF_LZO_CLASS);
+String extClazz = (extClazzConf != null) ?
+extClazzConf : System.getProperty(CONF_LZO_CLASS);
+return (extClazz != null) ? extClazz : defaultClazz;
+  }
+
   @Override
   public synchronized boolean isSupported() {
-if (!checked) {
+if (!checked || reinitCodecInTests) {
   checked = true;
-  String extClazzConf = conf.get(CONF_LZO_CLASS);
-  String extClazz = (extClazzConf != null) ?
-  extClazzConf : System.getProperty(CONF_LZO_CLASS);
-  String clazz = (extClazz != null) ? extClazz : defaultClazz;
+  reinitCodecInTests = conf.getBoolean("test.reload.lzo.codec", false);
+  clazz = getLzoCodecClass();
   try {
 LOG.info("Trying to load Lzo codec class: " + clazz);
 codec =
 (CompressionCodec) ReflectionUtils.newInstance(Class
 .forName(clazz), conf);
   } catch (ClassNotFoundException e) {
-// that is okay
+cnf = e;
   }
 }
 return codec != null;
@@ -105,9 +114,9 @@ public final class Compression {
   @Override
   CompressionCodec getCodec() throws IOException {
 if (!isSupported()) {
-  throw new IOException(
-  "LZO codec class not specified. Did you forget to set property "
-  + CONF_LZO_CLASS + "?");
+  throw new IOException(String.format(
+  "LZO codec %s=%s could not be loaded", CONF_LZO_CLASS, clazz),
+  cnf);
 }
 
 return codec;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1396fa2a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestCompression.java
--
diff --git 

[07/50] hadoop git commit: HADOOP-15541. [s3a] Shouldn't try to drain stream before aborting connection in case of timeout. Contributed by Sean Mackrory.

2018-07-26 Thread eyang
HADOOP-15541. [s3a] Shouldn't try to drain stream before aborting
connection in case of timeout. Contributed by Sean Mackrory.

(cherry picked from commit d503f65b6689b19278ec2a0cf9da5a8762539de8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/caf38532
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/caf38532
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/caf38532

Branch: refs/remotes/origin/branch-3.1
Commit: caf38532f3f3eafb4c874a6debddaad2fb2aa201
Parents: 2aaad40
Author: Steve Loughran 
Authored: Wed Jul 11 14:55:11 2018 +0100
Committer: Steve Loughran 
Committed: Wed Jul 11 14:55:11 2018 +0100

--
 .../apache/hadoop/fs/s3a/S3AInputStream.java| 24 +---
 1 file changed, 16 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/caf38532/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
index c54d3e26..91a2d9d 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInputStream.java
@@ -36,6 +36,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.EOFException;
 import java.io.IOException;
+import java.net.SocketTimeoutException;
 
 import static org.apache.commons.lang3.StringUtils.isNotEmpty;
 
@@ -155,11 +156,11 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
* @throws IOException on any failure to open the object
*/
   @Retries.OnceTranslated
-  private synchronized void reopen(String reason, long targetPos, long length)
-  throws IOException {
+  private synchronized void reopen(String reason, long targetPos, long length,
+  boolean forceAbort) throws IOException {
 
 if (wrappedStream != null) {
-  closeStream("reopen(" + reason + ")", contentRangeFinish, false);
+  closeStream("reopen(" + reason + ")", contentRangeFinish, forceAbort);
 }
 
 contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos,
@@ -324,7 +325,7 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
 
   //re-open at specific location if needed
   if (wrappedStream == null) {
-reopen("read from new offset", targetPos, len);
+reopen("read from new offset", targetPos, len, false);
   }
 });
   }
@@ -367,8 +368,11 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
 b = wrappedStream.read();
   } catch (EOFException e) {
 return -1;
+  } catch (SocketTimeoutException e) {
+onReadFailure(e, 1, true);
+b = wrappedStream.read();
   } catch (IOException e) {
-onReadFailure(e, 1);
+onReadFailure(e, 1, false);
 b = wrappedStream.read();
   }
   return b;
@@ -393,12 +397,13 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
* @throws IOException any exception thrown on the re-open attempt.
*/
   @Retries.OnceTranslated
-  private void onReadFailure(IOException ioe, int length) throws IOException {
+  private void onReadFailure(IOException ioe, int length, boolean forceAbort)
+  throws IOException {
 
 LOG.info("Got exception while trying to read from stream {}" +
 " trying to recover: " + ioe, uri);
 streamStatistics.readException();
-reopen("failure recovery", pos, length);
+reopen("failure recovery", pos, length, forceAbort);
   }
 
   /**
@@ -446,8 +451,11 @@ public class S3AInputStream extends FSInputStream 
implements CanSetReadahead {
   } catch (EOFException e) {
 // the base implementation swallows EOFs.
 return -1;
+  } catch (SocketTimeoutException e) {
+onReadFailure(e, len, true);
+bytes = wrappedStream.read(buf, off, len);
   } catch (IOException e) {
-onReadFailure(e, len);
+onReadFailure(e, len, false);
 bytes= wrappedStream.read(buf, off, len);
   }
   return bytes;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] hadoop git commit: HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica (Contributed by BELUGA BEHR via Daniel Templeton)

2018-07-26 Thread eyang
HDFS-13448. HDFS Block Placement - Ignore Locality for First Block Replica
(Contributed by BELUGA BEHR via Daniel Templeton)

Change-Id: I965d1cfa642ad24296038b83e3d5c9983545267d
(cherry picked from commit 849c45db187224095b13fe297a4d7377fbb9d2cd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00c476ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00c476ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00c476ab

Branch: refs/remotes/origin/branch-3.1
Commit: 00c476abd8f1d34414b646219856859477558458
Parents: 7e7792d
Author: Daniel Templeton 
Authored: Tue Jul 24 15:34:19 2018 -0700
Committer: Daniel Templeton 
Committed: Tue Jul 24 16:12:43 2018 -0700

--
 .../java/org/apache/hadoop/fs/CreateFlag.java   |  9 ++-
 .../org/apache/hadoop/hdfs/AddBlockFlag.java| 11 ++-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  3 +
 .../hadoop/hdfs/DistributedFileSystem.java  | 11 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 .../BlockPlacementPolicyDefault.java|  4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 30 +---
 .../server/namenode/TestFSDirWriteFileOp.java   | 79 
 8 files changed, 134 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00c476ab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index 383d65a..c3e088b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -116,7 +116,14 @@ public enum CreateFlag {
* Enforce the file to be a replicated file, no matter what its parent
* directory's replication or erasure coding policy is.
*/
-  SHOULD_REPLICATE((short) 0x80);
+  SHOULD_REPLICATE((short) 0x80),
+
+  /**
+   * Advise that the first block replica NOT take into account DataNode
+   * locality. The first block replica should be placed randomly within the
+   * cluster. Subsequent block replicas should follow DataNode locality rules.
+   */
+  IGNORE_CLIENT_LOCALITY((short) 0x100);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00c476ab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
index 6a0805b..b0686d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
@@ -36,7 +36,16 @@ public enum AddBlockFlag {
*
* @see CreateFlag#NO_LOCAL_WRITE
*/
-  NO_LOCAL_WRITE((short) 0x01);
+  NO_LOCAL_WRITE((short) 0x01),
+
+  /**
+   * Advise that the first block replica NOT take into account DataNode
+   * locality. The first block replica should be placed randomly within the
+   * cluster. Subsequent block replicas should follow DataNode locality rules.
+   *
+   * @see CreateFlag#IGNORE_CLIENT_LOCALITY
+   */
+  IGNORE_CLIENT_LOCALITY((short) 0x02);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00c476ab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 9734752..e977054 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -201,6 +201,9 @@ public class DFSOutputStream extends FSOutputSummer
 if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) {
   this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE);
 }
+if (flag.contains(CreateFlag.IGNORE_CLIENT_LOCALITY)) {
+  this.addBlockFlags.add(AddBlockFlag.IGNORE_CLIENT_LOCALITY);
+}
 if (progress != null) {
   DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "
   +"{}", src);


[18/50] hadoop git commit: HADOOP-15598. DataChecksum calculate checksum is contented on hashtable synchronization. Contributed by Prasanth Jayachandran.

2018-07-26 Thread eyang
HADOOP-15598. DataChecksum calculate checksum is contented on hashtable 
synchronization. Contributed by Prasanth Jayachandran.

(cherry picked from commit 0c7a578927032d5d1ef3469283d7d1fb7dee2a56)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34f1dd03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34f1dd03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34f1dd03

Branch: refs/remotes/origin/branch-3.1
Commit: 34f1dd03ee0c31173eb3a0b88a5fad7796a30c24
Parents: 44beab0
Author: Wei-Chiu Chuang 
Authored: Mon Jul 16 11:32:45 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Jul 16 11:33:43 2018 -0700

--
 .../src/main/java/org/apache/hadoop/util/NativeCrc32.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34f1dd03/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
index 0669b0a..3142df2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
@@ -28,12 +28,12 @@ import com.google.common.annotations.VisibleForTesting;
  * natively.
  */
 class NativeCrc32 {
-  
+  private static final boolean isSparc = 
System.getProperty("os.arch").toLowerCase().startsWith("sparc");
   /**
* Return true if the JNI-based native CRC extensions are available.
*/
   public static boolean isAvailable() {
-if (System.getProperty("os.arch").toLowerCase().startsWith("sparc")) {
+if (isSparc) {
   return false;
 } else {
   return NativeCodeLoader.isNativeCodeLoaded();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] hadoop git commit: Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via rkanter)

2018-07-26 Thread eyang
Only mount non-empty directories for cgroups (miklos.szeg...@cloudera.com via 
rkanter)

(cherry picked from commit 0838fe833738e04f5e6f6408e97866d77bebbf30)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c7d9163
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c7d9163
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c7d9163

Branch: refs/remotes/origin/branch-3.1
Commit: 1c7d916347d1c68ad32b592d764890b40b66e558
Parents: 27e2b4b
Author: Robert Kanter 
Authored: Mon Jul 9 10:37:20 2018 -0700
Committer: Robert Kanter 
Committed: Wed Jul 18 16:07:48 2018 -0700

--
 .../impl/container-executor.c   | 30 +++-
 .../test/test-container-executor.c  | 20 +
 2 files changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c7d9163/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index baf0e8b..eff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -2379,6 +2379,28 @@ void chown_dir_contents(const char *dir_path, uid_t uid, 
gid_t gid) {
   free(path_tmp);
 }
 
+int is_empty(char *target_dir) {
+  DIR *dir = NULL;
+  struct dirent *entry = NULL;
+  dir = opendir(target_dir);
+  if (!dir) {
+fprintf(LOGFILE, "Could not open directory %s - %s\n", target_dir,
+strerror(errno));
+return 0;
+  }
+  while ((entry = readdir(dir)) != NULL) {
+if (strcmp(entry->d_name, ".") == 0) {
+  continue;
+}
+if (strcmp(entry->d_name, "..") == 0) {
+  continue;
+}
+fprintf(LOGFILE, "Directory is not empty %s\n", target_dir);
+return 0;
+  }
+  return 1;
+}
+
 /**
  * Mount a cgroup controller at the requested mount point and create
  * a hierarchy for the Hadoop NodeManager to manage.
@@ -2413,7 +2435,13 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
 result = -1;
   } else {
 if (strstr(mount_path, "..") != NULL) {
-  fprintf(LOGFILE, "Unsupported cgroup mount path detected.\n");
+  fprintf(LOGFILE, "Unsupported cgroup mount path detected. %s\n",
+  mount_path);
+  result = INVALID_COMMAND_PROVIDED;
+  goto cleanup;
+}
+if (!is_empty(mount_path)) {
+  fprintf(LOGFILE, "cgroup mount path is not empty. %s\n", mount_path);
   result = INVALID_COMMAND_PROVIDED;
   goto cleanup;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c7d9163/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 3d32883..a199d84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1203,6 +1203,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+void test_is_empty() {
+  printf("\nTesting is_empty function\n");
+  if (is_empty("/")) {
+printf("FAIL: / should not be empty\n");
+exit(1);
+  }
+  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+exit(1);
+  }
+  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
+  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
+printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+exit(1);
+  }
+}
+
 // This test is expected to be executed either by a regular
 // user or by root. If executed by a regular user it doesn't
 // test all the functions that would depend on changing the
@@ -1264,6 +1281,9 @@ int main(int 

[37/50] hadoop git commit: YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed by Bilwa S T.

2018-07-26 Thread eyang
YARN-8548. AllocationRespose proto setNMToken initBuilder not done. Contributed 
by Bilwa S T.

(cherry picked from commit ff7c2eda34c2c40ad71b50df6462a661bd213fbd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a684a2ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a684a2ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a684a2ef

Branch: refs/remotes/origin/branch-3.1
Commit: a684a2efb855e1933b0d808363c3c1fe69778867
Parents: 0710107
Author: bibinchundatt 
Authored: Tue Jul 24 16:17:20 2018 +0530
Committer: bibinchundatt 
Committed: Tue Jul 24 16:30:31 2018 +0530

--
 .../impl/pb/AllocateResponsePBImpl.java |  1 +
 .../resourcemanager/recovery/TestProtos.java| 20 
 2 files changed, 21 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a684a2ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
index 3ab5563..8df56b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
@@ -347,6 +347,7 @@ public class AllocateResponsePBImpl extends 
AllocateResponse {
 
   @Override
   public synchronized void setNMTokens(List nmTokens) {
+maybeInitBuilder();
 if (nmTokens == null || nmTokens.isEmpty()) {
   if (this.nmTokens != null) {
 this.nmTokens.clear();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a684a2ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java
index cc96412..d42b411 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestProtos.java
@@ -18,7 +18,15 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.recovery;
 
+import static org.junit.Assert.*;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import 
org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl;
+import org.apache.hadoop.yarn.api.records.NMToken;
 import 
org.apache.hadoop.yarn.proto.YarnServerResourceManagerRecoveryProtos.EpochProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -33,4 +41,16 @@ public class TestProtos {
 String protoString = proto.toString();
 Assert.assertNotNull(protoString);
   }
+
+  @Test
+  public void testProtoAllocateResponse() {
+AllocateResponseProto proto = AllocateResponseProto.getDefaultInstance();
+AllocateResponsePBImpl alloc = new AllocateResponsePBImpl(proto);
+List nmTokens = new ArrayList();
+try {
+  alloc.setNMTokens(nmTokens);
+} catch (Exception ex) {
+  fail();
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] hadoop git commit: YARN-8544. [DS] AM registration fails when hadoop authorization is enabled. Contributed by Bibin A Chundatt.

2018-07-26 Thread eyang
YARN-8544. [DS] AM registration fails when hadoop authorization is enabled. 
Contributed by Bibin A Chundatt.

(cherry picked from commit 84612788339392fcda1aef0e27c43f5c6b2a19e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0710107f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0710107f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0710107f

Branch: refs/remotes/origin/branch-3.1
Commit: 0710107f8d931d63627d356d6100885696cc8736
Parents: 5aca058
Author: bibinchundatt 
Authored: Tue Jul 24 13:09:17 2018 +0530
Committer: bibinchundatt 
Committed: Tue Jul 24 13:11:31 2018 +0530

--
 .../src/main/conf/hadoop-policy.xml | 20 
 .../dev-support/findbugs-exclude.xml|  4 
 .../hadoop/yarn/conf/YarnConfiguration.java |  7 ++
 .../yarn/conf/TestYarnConfigurationFields.java  |  4 
 .../nodemanager/amrmproxy/AMRMProxyService.java |  8 +++
 .../collectormanager/NMCollectorService.java|  2 +-
 .../containermanager/ContainerManagerImpl.java  |  2 +-
 .../localizer/ResourceLocalizationService.java  |  2 +-
 .../security/authorize/NMPolicyProvider.java| 25 ++--
 .../security/authorize/RMPolicyProvider.java|  3 +++
 10 files changed, 72 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0710107f/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
index cf3dd1f..bd7c111 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
@@ -242,4 +242,24 @@
 group list is separated by a blank. For e.g. "alice,bob users,wheel".
 A special value of "*" means all users are allowed.
   
+
+  
+
security.applicationmaster-nodemanager.applicationmaster.protocol.acl
+*
+ACL for ApplicationMasterProtocol, used by the Nodemanager
+and ApplicationMasters to communicate.
+The ACL is a comma-separated list of user and group names. The user and
+group list is separated by a blank. For e.g. "alice,bob users,wheel".
+A special value of "*" means all users are allowed.
+  
+
+  
+security.distributedscheduling.protocol.acl
+*
+ACL for DistributedSchedulingAMProtocol, used by the 
Nodemanager
+and Resourcemanager to communicate.
+The ACL is a comma-separated list of user and group names. The user and
+group list is separated by a blank. For e.g. "alice,bob users,wheel".
+A special value of "*" means all users are allowed.
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0710107f/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5841361..7d40c70 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -461,6 +461,10 @@
   
   
 
+  
+
+
+  
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0710107f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 586fabf..f7fd4fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2202,6 +2202,9 @@ public class YarnConfiguration extends Configuration {
   public static final String 
   YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL =
   "security.applicationmaster.protocol.acl";
+  public static final String
+  YARN_SECURITY_SERVICE_AUTHORIZATION_DISTRIBUTEDSCHEDULING_PROTOCOL =
+  "security.distributedscheduling.protocol.acl";
 
   public static final String 
   YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL =
@@ -2218,6 +2221,10 @@ public class YarnConfiguration extends Configuration {
   YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL =
   

[03/50] hadoop git commit: HADOOP-15384. distcp numListstatusThreads option doesn't get to -delete scan. Contributed by Steve Loughran.

2018-07-26 Thread eyang
HADOOP-15384. distcp numListstatusThreads option doesn't get to -delete scan.
Contributed by Steve Loughran.

(cherry picked from commit ca8b80bf59c0570bb9172208d3a6c993a6854514)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d54241e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d54241e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d54241e9

Branch: refs/remotes/origin/branch-3.1
Commit: d54241e9c995c8f63e6a6317599b858f486763ce
Parents: 8ad82ea
Author: Steve Loughran 
Authored: Tue Jul 10 10:50:40 2018 +0100
Committer: Steve Loughran 
Committed: Tue Jul 10 10:50:40 2018 +0100

--
 .../java/org/apache/hadoop/tools/DistCpOptions.java|  5 -
 .../org/apache/hadoop/tools/mapred/CopyCommitter.java  | 13 +++--
 .../tools/contract/AbstractContractDistCpTest.java |  2 +-
 3 files changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d54241e9/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index ea99016..cff04eb 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -387,7 +387,10 @@ public final class DistCpOptions {
   DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.TRACK_MISSING,
   String.valueOf(trackPath));
 }
-
+if (numListstatusThreads > 0) {
+  DistCpOptionSwitch.addToConf(conf, 
DistCpOptionSwitch.NUM_LISTSTATUS_THREADS,
+  Integer.toString(numListstatusThreads));
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d54241e9/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 07eacb0..38106fa 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -392,6 +392,9 @@ public class CopyCommitter extends FileOutputCommitter {
 Path sourceListing = new 
Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
 FileSystem clusterFS = sourceListing.getFileSystem(conf);
 Path sortedSourceListing = DistCpUtils.sortListing(conf, sourceListing);
+long sourceListingCompleted = System.currentTimeMillis();
+LOG.info("Source listing completed in {}",
+formatDuration(sourceListingCompleted - listingStart));
 
 // Similarly, create the listing of target-files. Sort alphabetically.
 Path targetListing = new Path(sourceListing.getParent(), 
"targetListing.seq");
@@ -409,8 +412,8 @@ public class CopyCommitter extends FileOutputCommitter {
 // Walk both source and target file listings.
 // Delete all from target that doesn't also exist on source.
 long deletionStart = System.currentTimeMillis();
-LOG.info("Listing completed in {}",
-formatDuration(deletionStart - listingStart));
+LOG.info("Destination listing completed in {}",
+formatDuration(deletionStart - sourceListingCompleted));
 
 long deletedEntries = 0;
 long filesDeleted = 0;
@@ -545,9 +548,15 @@ public class CopyCommitter extends FileOutputCommitter {
 // Set up options to be the same from the CopyListing.buildListing's
 // perspective, so to collect similar listings as when doing the copy
 //
+// thread count is picked up from the job
+int threads = conf.getInt(DistCpConstants.CONF_LABEL_LISTSTATUS_THREADS,
+DistCpConstants.DEFAULT_LISTSTATUS_THREADS);
+LOG.info("Scanning destination directory {} with thread count: {}",
+targetFinalPath, threads);
 DistCpOptions options = new DistCpOptions.Builder(targets, resultNonePath)
 .withOverwrite(overwrite)
 .withSyncFolder(syncFolder)
+.withNumListstatusThreads(threads)
 .build();
 DistCpContext distCpContext = new DistCpContext(options);
 distCpContext.setTargetPathExists(targetPathExists);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d54241e9/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java
--
diff 

[47/50] hadoop git commit: HDFS-11060. make DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED configurable. Contributed by Lantao Jin.

2018-07-26 Thread eyang
HDFS-11060. make DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED configurable. 
Contributed by Lantao Jin.

(cherry picked from commit e95c5e9f62452ee848875ec2f8642eab4992cd23)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/964f3454
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/964f3454
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/964f3454

Branch: refs/remotes/origin/branch-3.1
Commit: 964f3454d1237fa25aa828504612b4a11d8e76ff
Parents: 830ef12
Author: Wei-Chiu Chuang 
Authored: Wed Jul 25 11:04:18 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed Jul 25 11:05:18 2018 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 3 +++
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 8 ++--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 9 +
 .../hdfs/server/namenode/TestListCorruptFileBlocks.java | 2 +-
 4 files changed, 19 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/964f3454/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index dde7eb7..ea3abb1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -238,6 +238,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_DEFAULT
   = 1;
 
+  public static final String  
DFS_NAMENODE_MAX_CORRUPT_FILE_BLOCKS_RETURNED_KEY = 
"dfs.namenode.max-corrupt-file-blocks-returned";
+  public static final int 
DFS_NAMENODE_MAX_CORRUPT_FILE_BLOCKS_RETURNED_DEFAULT = 100;
+
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY;
   public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/964f3454/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 19ff08d..2098252 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -425,7 +425,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   public static final Log auditLog = LogFactory.getLog(
   FSNamesystem.class.getName() + ".audit");
 
-  static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 100;
+  private final int maxCorruptFileBlocksReturn;
   static int BLOCK_DELETION_INCREMENT = 1000;
   private final boolean isPermissionEnabled;
   private final UserGroupInformation fsOwner;
@@ -831,6 +831,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   DFSConfigKeys.DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_KEY,
   DFSConfigKeys.DFS_NAMENODE_FILE_CLOSE_NUM_COMMITTED_ALLOWED_DEFAULT);
 
+  this.maxCorruptFileBlocksReturn = conf.getInt(
+  DFSConfigKeys.DFS_NAMENODE_MAX_CORRUPT_FILE_BLOCKS_RETURNED_KEY,
+  DFSConfigKeys.DFS_NAMENODE_MAX_CORRUPT_FILE_BLOCKS_RETURNED_DEFAULT);
+
   this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
   
   this.standbyShouldCheckpoint = conf.getBoolean(
@@ -5497,7 +5501,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   if (src.startsWith(path)){
 corruptFiles.add(new CorruptFileBlockInfo(src, blk));
 count++;
-if (count >= DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED)
+if (count >= maxCorruptFileBlocksReturn)
   break;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/964f3454/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index c092bff..4bdeef6 100644
--- 

[06/50] hadoop git commit: YARN-8491. TestServiceCLI#testEnableFastLaunch fail when umask is 077. Contributed by K G Bakthavachalam.

2018-07-26 Thread eyang
YARN-8491. TestServiceCLI#testEnableFastLaunch fail when umask is 077. 
Contributed by K G Bakthavachalam.

(cherry picked from commit 52e1bc8539ce769f47743d8b2d318a54c3887ba0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2aaad400
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2aaad400
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2aaad400

Branch: refs/remotes/origin/branch-3.1
Commit: 2aaad4000ae2e7a33fb0753f51b89dfd3763f519
Parents: 9b4ead9
Author: bibinchundatt 
Authored: Wed Jul 11 16:19:51 2018 +0530
Committer: bibinchundatt 
Committed: Wed Jul 11 16:24:37 2018 +0530

--
 .../org/apache/hadoop/yarn/service/client/TestServiceCLI.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2aaad400/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java
index 78a8198..363fe91 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/client/TestServiceCLI.java
@@ -121,12 +121,16 @@ public class TestServiceCLI {
 basedir = new File("target", "apps");
 basedirProp = YARN_SERVICE_BASE_PATH + "=" + basedir.getAbsolutePath();
 conf.set(YARN_SERVICE_BASE_PATH, basedir.getAbsolutePath());
+fs = new SliderFileSystem(conf);
 dependencyTarGzBaseDir = tmpFolder.getRoot();
+fs.getFileSystem()
+.setPermission(new Path(dependencyTarGzBaseDir.getAbsolutePath()),
+new FsPermission("755"));
 dependencyTarGz = getDependencyTarGz(dependencyTarGzBaseDir);
 dependencyTarGzProp = DEPENDENCY_TARBALL_PATH + "=" + dependencyTarGz
 .toString();
 conf.set(DEPENDENCY_TARBALL_PATH, dependencyTarGz.toString());
-fs = new SliderFileSystem(conf);
+
 if (basedir.exists()) {
   FileUtils.deleteDirectory(basedir);
 } else {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/50] hadoop git commit: Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

2018-07-26 Thread eyang
Disable mounting cgroups by default (miklos.szeg...@cloudera.com via rkanter)

(cherry picked from commit 351cf87c92872d90f62c476f85ae4d02e485769c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27e2b4b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27e2b4b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27e2b4b3

Branch: refs/remotes/origin/branch-3.1
Commit: 27e2b4b36456ea5f42d38329dcc6bee0cb7b7ac0
Parents: d82edec
Author: Robert Kanter 
Authored: Thu Jun 7 17:09:34 2018 -0700
Committer: Robert Kanter 
Committed: Wed Jul 18 16:07:48 2018 -0700

--
 .../impl/container-executor.c   | 54 ++--
 .../impl/container-executor.h   |  4 ++
 .../main/native/container-executor/impl/main.c  | 19 ---
 .../src/site/markdown/NodeManagerCgroups.md |  2 +-
 4 files changed, 55 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27e2b4b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 1b8842a..baf0e8b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -73,6 +73,7 @@ static const char* DEFAULT_BANNED_USERS[] = {"yarn", 
"mapred", "hdfs", "bin", 0}
 
 static const int DEFAULT_DOCKER_SUPPORT_ENABLED = 0;
 static const int DEFAULT_TC_SUPPORT_ENABLED = 0;
+static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0;
 
 static const char* PROC_PATH = "/proc";
 
@@ -482,6 +483,12 @@ int is_tc_support_enabled() {
 DEFAULT_TC_SUPPORT_ENABLED, _cfg);
 }
 
+int is_mount_cgroups_support_enabled() {
+return is_feature_enabled(MOUNT_CGROUP_SUPPORT_ENABLED_KEY,
+  DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED,
+  _cfg);
+}
+
 /**
  * Utility function to concatenate argB to argA using the concat_pattern.
  */
@@ -2346,20 +2353,25 @@ void chown_dir_contents(const char *dir_path, uid_t 
uid, gid_t gid) {
   DIR *dp;
   struct dirent *ep;
 
-  char *path_tmp = malloc(strlen(dir_path) + NAME_MAX + 2);
+  size_t len = strlen(dir_path) + NAME_MAX + 2;
+  char *path_tmp = malloc(len);
   if (path_tmp == NULL) {
 return;
   }
 
-  char *buf = stpncpy(path_tmp, dir_path, strlen(dir_path));
-  *buf++ = '/';
-
   dp = opendir(dir_path);
   if (dp != NULL) {
 while ((ep = readdir(dp)) != NULL) {
-  stpncpy(buf, ep->d_name, strlen(ep->d_name));
-  buf[strlen(ep->d_name)] = '\0';
-  change_owner(path_tmp, uid, gid);
+  if (strcmp(ep->d_name, ".") != 0 &&
+  strcmp(ep->d_name, "..") != 0 &&
+  strstr(ep->d_name, "..") == NULL) {
+int result = snprintf(path_tmp, len, "%s/%s", dir_path, ep->d_name);
+if (result > 0 && result < len) {
+  change_owner(path_tmp, uid, gid);
+} else {
+  fprintf(LOGFILE, "Ignored %s/%s due to length", dir_path, 
ep->d_name);
+}
+  }
 }
 closedir(dp);
   }
@@ -2383,11 +2395,16 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   char *mount_path = malloc(len);
   char hier_path[EXECUTOR_PATH_MAX];
   int result = 0;
-  struct stat sb;
 
   if (controller == NULL || mount_path == NULL) {
 fprintf(LOGFILE, "Failed to mount cgroup controller; not enough memory\n");
 result = OUT_OF_MEMORY;
+goto cleanup;
+  }
+  if (hierarchy == NULL || strstr(hierarchy, "..") != NULL) {
+fprintf(LOGFILE, "Unsupported cgroup hierarhy path detected.\n");
+result = INVALID_COMMAND_PROVIDED;
+goto cleanup;
   }
   if (get_kv_key(pair, controller, len) < 0 ||
   get_kv_value(pair, mount_path, len) < 0) {
@@ -2395,13 +2412,10 @@ int mount_cgroup(const char *pair, const char 
*hierarchy) {
   pair);
 result = -1;
   } else {
-if (stat(mount_path, ) != 0) {
-  // Create mount point, if it does not exist
-  const mode_t mount_perms = S_IRWXU | S_IRGRP | S_IXGRP;
-  if (mkdirs(mount_path, mount_perms) == 0) {
-fprintf(LOGFILE, "Failed to create cgroup mount point %s at %s\n",
-  controller, mount_path);
-  }
+if (strstr(mount_path, "..") != NULL) {
+  

[43/50] hadoop git commit: YARN-8546. Resource leak caused by a reserved container being released more than once under async scheduling. Contributed by Tao Yang.

2018-07-26 Thread eyang
YARN-8546. Resource leak caused by a reserved container being released more 
than once under async scheduling. Contributed by Tao Yang.

(Cherry-picked from commit 5be9f4a5d05c9cb99348719fe35626b1de3055db)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b89624a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b89624a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b89624a9

Branch: refs/remotes/origin/branch-3.1
Commit: b89624a943268e180e0e1532b3a394ff580a962c
Parents: 1396fa2
Author: Weiwei Yang 
Authored: Wed Jul 25 17:35:27 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Jul 25 17:53:40 2018 +0800

--
 .../scheduler/common/fica/FiCaSchedulerApp.java | 15 
 .../TestCapacitySchedulerAsyncScheduling.java   | 89 
 2 files changed, 104 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b89624a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 3b1b82c..9810e98 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -361,6 +361,21 @@ public class FiCaSchedulerApp extends 
SchedulerApplicationAttempt {
 .isEmpty()) {
   for (SchedulerContainer
   releaseContainer : allocation.getToRelease()) {
+// Make sure to-release reserved containers are not outdated
+if (releaseContainer.getRmContainer().getState()
+== RMContainerState.RESERVED
+&& releaseContainer.getRmContainer() != releaseContainer
+.getSchedulerNode().getReservedContainer()) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Failed to accept this proposal because "
++ "it tries to release an outdated reserved container "
++ releaseContainer.getRmContainer().getContainerId()
++ " on node " + releaseContainer.getSchedulerNode().getNodeID()
++ " whose reserved container is "
++ releaseContainer.getSchedulerNode().getReservedContainer());
+  }
+  return false;
+}
 // Only consider non-reserved container (reserved container will
 // not affect available resource of node) on the same node
 if (releaseContainer.getRmContainer().getState()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b89624a9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
index 338b9f9..c2c1519 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import 

[32/50] hadoop git commit: YARN-8301. Added YARN service upgrade instructions. Contributed by Chandni Singh

2018-07-26 Thread eyang
YARN-8301.  Added YARN service upgrade instructions.
Contributed by Chandni Singh

(cherry picked from commit 10014a4d88f239d3c072e51bc0739cba1fca9406)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f2a129f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f2a129f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f2a129f

Branch: refs/remotes/origin/branch-3.1
Commit: 4f2a129f2e2326ca28659d93b412cf8649ed5025
Parents: 004e1f2
Author: Eric Yang 
Authored: Fri Jul 20 19:46:35 2018 -0400
Committer: Eric Yang 
Committed: Fri Jul 20 19:48:19 2018 -0400

--
 .../src/site/markdown/yarn-service/Overview.md  |   4 +-
 .../markdown/yarn-service/ServiceUpgrade.md | 197 +++
 2 files changed, 198 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f2a129f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
index 8e2bf9a..041b0ee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/Overview.md
@@ -56,6 +56,4 @@ The benefits of combining these workloads are two-fold:
 * [Registry DNS](RegistryDNS.html): Deep dives into the Registry DNS internals.
 * [Examples](Examples.html): List some example service definitions 
(`Yarnfile`).
 * [Configurations](Configurations.html): Describes how to configure the custom 
services on YARN.
-
-
- 
+* [Service Upgrade](ServiceUpgrade.html): Describes how to upgrade a YARN 
service which is an experimental feature.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f2a129f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md
new file mode 100644
index 000..839be22
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/yarn-service/ServiceUpgrade.md
@@ -0,0 +1,197 @@
+
+
+# Service Upgrade (Experimental Feature - Tech Preview)
+
+Yarn service provides a way of upgrading/downgrading long running applications 
without
+shutting down the application to minimize the downtime during this process. 
This is
+an experimental feature which is currently not enabled by default.
+
+## Overview
+
+Upgrading a Yarn Service is a 3 steps (or 2 steps when auto-finalization of
+upgrade is chosen) process:
+
+1. Initiate service upgrade.\
+This step involves providing the service spec of the newer version of the 
service.
+Once, the service upgrade is initiated, the state of the service is changed to
+`UPGRADING`.
+
+2. Upgrade component instances.\
+This step involves triggering upgrade of individual component instance.
+By providing an API to upgrade at instance level, users can orchestrate upgrade
+of the entire service in any order which is relevant for the service.\
+In addition, there are APIs to upgrade multiple instances, all instances of a
+component, and all instances of multiple components.
+
+3. Finalize upgrade.\
+This step involves finalization of upgrade. With an explicit step to finalize 
the
+upgrade, users have a chance to cancel current upgrade in progress. When the
+user chose to cancel, the service will make the best effort to revert to the
+previous version.\
+\
+When the upgrade is finalized, the old service definition is
+overwritten by the new service definition and the service state changes to 
`STABLE`.\
+A service can be auto-finalized when the upgrade is initialized with
+`-autoFinalize` option. With auto-finalization, when all the 
component-instances of
+the service have been upgraded, finalization will be performed automatically 
by the
+service framework.\
+\
+**NOTE**: Cancel of upgrade is not implemented yet.
+
+## Upgrade Example
+This example shows upgrade of sleeper service. Below is the sleeper service
+definition
+
+```
+{
+  "name": "sleeper-service",
+  "components" :
+[
+  {
+"name": "sleeper",
+"version": "1.0.0",
+"number_of_containers": 1,
+"launch_command": "sleep 90",
+"resource": {
+  "cpus": 1,
+  "memory": "256"
+   }
+  }
+]
+}
+```
+Assuming, user launched an instance of sleeper 

[38/50] hadoop git commit: YARN-7748. TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted fails due to multiple container fail events. Contributed by Weiwei Yang.

2018-07-26 Thread eyang
YARN-7748. 
TestContainerResizing.testIncreaseContainerUnreservedWhenApplicationCompleted 
fails due to multiple container fail events. Contributed by Weiwei Yang.

(cherry picked from commit 35ce6eb1f526ce3db7e015fb1761eee15604100c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4488fd82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4488fd82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4488fd82

Branch: refs/remotes/origin/branch-3.1
Commit: 4488fd8295011b37f683c964ae2012fe1b6a4044
Parents: a684a2e
Author: Sunil G 
Authored: Tue Jul 24 22:20:06 2018 +0530
Committer: Sunil G 
Committed: Tue Jul 24 22:21:15 2018 +0530

--
 .../scheduler/capacity/TestContainerResizing.java | 18 +-
 1 file changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4488fd82/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
index eacbf6e..307d5ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent;
@@ -58,7 +59,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica
 .FiCaSchedulerNode;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSet;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -740,11 +740,14 @@ public class TestContainerResizing {
   @Test
   public void testIncreaseContainerUnreservedWhenApplicationCompleted()
   throws Exception {
+// Disable relaunch app attempt on failure, in order to check
+// resource usages for current app only.
+conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
 /**
  * Similar to testIncreaseContainerUnreservedWhenContainerCompleted, when
  * application finishes, reserved increase container should be cancelled
  */
-MockRM rm1 = new MockRM() {
+MockRM rm1 = new MockRM(conf) {
   @Override
   public RMNodeLabelsManager createNodeLabelManager() {
 return mgr;
@@ -807,9 +810,14 @@ public class TestContainerResizing {
 Assert.assertEquals(6 * GB,
 app.getAppAttemptResourceUsage().getReserved().getMemorySize());
 
-// Kill the application
-cs.handle(new 
AppAttemptRemovedSchedulerEvent(am1.getApplicationAttemptId(),
-RMAppAttemptState.KILLED, false));
+// Kill the application by killing the AM container
+ContainerId amContainer =
+ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
+cs.killContainer(cs.getRMContainer(amContainer));
+rm1.waitForState(am1.getApplicationAttemptId(),
+RMAppAttemptState.FAILED);
+rm1.waitForState(am1.getApplicationAttemptId().getApplicationId(),
+RMAppState.FAILED);
 
 /* Check statuses after reservation satisfied */
 // Increase request should be unreserved



[15/50] hadoop git commit: HDFS-13475. RBF: Admin cannot enforce Router enter SafeMode. Contributed by Chao Sun.

2018-07-26 Thread eyang
HDFS-13475. RBF: Admin cannot enforce Router enter SafeMode. Contributed by 
Chao Sun.

(cherry picked from commit 359ea4e18147af5677c6d88265e26de6b6c72999)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4898edf4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4898edf4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4898edf4

Branch: refs/remotes/origin/branch-3.1
Commit: 4898edf4f7fc83ab48cc2ed20bfe66ca0804699a
Parents: 9a79e89
Author: Inigo Goiri 
Authored: Mon Jul 16 09:46:21 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Jul 16 09:47:00 2018 -0700

--
 .../hdfs/server/federation/router/Router.java   |  7 +++
 .../federation/router/RouterAdminServer.java| 32 ---
 .../federation/router/RouterRpcServer.java  | 26 +
 .../router/RouterSafemodeService.java   | 44 ---
 .../federation/router/TestRouterAdminCLI.java   |  7 ++-
 .../federation/router/TestRouterSafemode.java   | 58 
 6 files changed, 121 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4898edf4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
index df2a448..7e67daa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
@@ -665,4 +665,11 @@ public class Router extends CompositeService {
   Collection getNamenodeHearbeatServices() {
 return this.namenodeHeartbeatServices;
   }
+
+  /**
+   * Get the Router safe mode service
+   */
+  RouterSafemodeService getSafemodeService() {
+return this.safemodeService;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4898edf4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
index 139dfb8..8e23eca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.Set;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -272,23 +273,37 @@ public class RouterAdminServer extends AbstractService
   @Override
   public EnterSafeModeResponse enterSafeMode(EnterSafeModeRequest request)
   throws IOException {
-this.router.updateRouterState(RouterServiceState.SAFEMODE);
-this.router.getRpcServer().setSafeMode(true);
-return EnterSafeModeResponse.newInstance(verifySafeMode(true));
+boolean success = false;
+RouterSafemodeService safeModeService = this.router.getSafemodeService();
+if (safeModeService != null) {
+  this.router.updateRouterState(RouterServiceState.SAFEMODE);
+  safeModeService.setManualSafeMode(true);
+  success = verifySafeMode(true);
+}
+return EnterSafeModeResponse.newInstance(success);
   }
 
   @Override
   public LeaveSafeModeResponse leaveSafeMode(LeaveSafeModeRequest request)
   throws IOException {
-this.router.updateRouterState(RouterServiceState.RUNNING);
-this.router.getRpcServer().setSafeMode(false);
-return LeaveSafeModeResponse.newInstance(verifySafeMode(false));
+boolean success = false;
+RouterSafemodeService safeModeService = this.router.getSafemodeService();
+if (safeModeService != null) {
+  this.router.updateRouterState(RouterServiceState.RUNNING);
+  safeModeService.setManualSafeMode(false);
+  success = verifySafeMode(false);
+}
+return LeaveSafeModeResponse.newInstance(success);
   }
 
   @Override
   public GetSafeModeResponse getSafeMode(GetSafeModeRequest request)
   throws IOException {
-boolean isInSafeMode = this.router.getRpcServer().isInSafeMode();
+ 

[29/50] hadoop git commit: MAPREDUCE-7118. Distributed cache conflicts breaks backwards compatability. (Jason Lowe via wangda)

2018-07-26 Thread eyang
MAPREDUCE-7118. Distributed cache conflicts breaks backwards compatability. 
(Jason Lowe via wangda)

Change-Id: I89ab4852b4ad305fec19812e8931c59d96581376
(cherry picked from commit b3b4d4ccb53fdf8dacc66e912822b34f8b3bf215)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23624c92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23624c92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23624c92

Branch: refs/remotes/origin/branch-3.1
Commit: 23624c9248b01794cae88f299bf97735ed09b6ce
Parents: 749fff5
Author: Wangda Tan 
Authored: Thu Jul 19 12:03:24 2018 -0700
Committer: Wangda Tan 
Committed: Thu Jul 19 14:26:40 2018 -0700

--
 .../mapreduce/v2/util/LocalResourceBuilder.java |  8 +++-
 .../hadoop/mapreduce/v2/util/TestMRApps.java| 20 ++--
 2 files changed, 21 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23624c92/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java
index 48b157e..48cc29e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/LocalResourceBuilder.java
@@ -27,7 +27,6 @@ import 
org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.InvalidJobConfException;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.yarn.api.records.LocalResource;
@@ -144,10 +143,9 @@ class LocalResourceBuilder {
 
 LocalResource orig = localResources.get(linkName);
 if(orig != null && !orig.getResource().equals(URL.fromURI(p.toUri( 
{
-  throw new InvalidJobConfException(
-  getResourceDescription(orig.getType()) + orig.getResource()
-  +
-  " conflicts with " + getResourceDescription(type) + u);
+  LOG.warn(getResourceDescription(orig.getType()) + orig.getResource()
+  + " conflicts with " + getResourceDescription(type) + u);
+  continue;
 }
 Boolean sharedCachePolicy = 
sharedCacheUploadPolicies.get(u.toString());
 sharedCachePolicy =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23624c92/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
index 3aadd63..c6a2874 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
@@ -360,7 +360,7 @@ public class TestMRApps {
   }
   
   @SuppressWarnings("deprecation")
-  @Test(timeout = 12, expected = InvalidJobConfException.class)
+  @Test(timeout = 12)
   public void testSetupDistributedCacheConflicts() throws Exception {
 Configuration conf = new Configuration();
 conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
@@ -388,10 +388,18 @@ public class TestMRApps {
 Map localResources = 
   new HashMap();
 MRApps.setupDistributedCache(conf, localResources);
+
+assertEquals(1, localResources.size());
+LocalResource lr = localResources.get("something");
+//Archive wins
+assertNotNull(lr);
+assertEquals(10l, lr.getSize());
+assertEquals(10l, lr.getTimestamp());
+assertEquals(LocalResourceType.ARCHIVE, lr.getType());
   }
   
   @SuppressWarnings("deprecation")
-  @Test(timeout = 12, 

[30/50] hadoop git commit: HDFS-13743. RBF: Router throws NullPointerException due to the invalid initialization of MountTableResolver. Contributed by Takanobu Asanuma.

2018-07-26 Thread eyang
HDFS-13743. RBF: Router throws NullPointerException due to the invalid 
initialization of MountTableResolver. Contributed by Takanobu Asanuma.

(cherry picked from commit 7b25fb949bf6f02df997beeca7df46c9e84c8d96)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/823d576a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/823d576a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/823d576a

Branch: refs/remotes/origin/branch-3.1
Commit: 823d576a66e51c4eb93b18d28a378505cdd34561
Parents: 23624c9
Author: Yiqun Lin 
Authored: Fri Jul 20 17:28:57 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Jul 20 17:31:13 2018 +0800

--
 .../federation/resolver/MountTableResolver.java | 28 +--
 .../TestInitializeMountTableResolver.java   | 82 
 2 files changed, 102 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/823d576a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index 3f6efd6..c264de3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.federation.resolver;
 
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
@@ -42,7 +44,6 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -149,14 +150,25 @@ public class MountTableResolver
* @param conf Configuration for this resolver.
*/
   private void initDefaultNameService(Configuration conf) {
-try {
-  this.defaultNameService = conf.get(
-  DFS_ROUTER_DEFAULT_NAMESERVICE,
-  DFSUtil.getNamenodeNameServiceId(conf));
-} catch (HadoopIllegalArgumentException e) {
-  LOG.error("Cannot find default name service, setting it to the first");
+this.defaultNameService = conf.get(
+DFS_ROUTER_DEFAULT_NAMESERVICE,
+DFSUtil.getNamenodeNameServiceId(conf));
+
+if (defaultNameService == null) {
+  LOG.warn(
+  "{} and {} is not set. Fallback to {} as the default name service.",
+  DFS_ROUTER_DEFAULT_NAMESERVICE, DFS_NAMESERVICE_ID, 
DFS_NAMESERVICES);
   Collection nsIds = DFSUtilClient.getNameServiceIds(conf);
-  this.defaultNameService = nsIds.iterator().next();
+  if (nsIds.isEmpty()) {
+this.defaultNameService = "";
+  } else {
+this.defaultNameService = nsIds.iterator().next();
+  }
+}
+
+if (this.defaultNameService.equals("")) {
+  LOG.warn("Default name service is not set.");
+} else {
   LOG.info("Default name service: {}", this.defaultNameService);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/823d576a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
new file mode 100644
index 000..5db7531
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license 

hadoop git commit: YARN-8429. Improve diagnostic message when artifact is not set properly. Contributed by Gour Saha

2018-07-26 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 77721f39e -> 8d3c068e5


YARN-8429. Improve diagnostic message when artifact is not set properly.
   Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d3c068e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d3c068e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d3c068e

Branch: refs/heads/trunk
Commit: 8d3c068e59f18e3f8260713fee83c458aa1d
Parents: 77721f3
Author: Eric Yang 
Authored: Thu Jul 26 20:02:13 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 20:02:13 2018 -0400

--
 .../exceptions/RestApiErrorMessages.java|  6 +-
 .../provider/AbstractClientProvider.java| 14 ++---
 .../defaultImpl/DefaultClientProvider.java  | 22 ---
 .../provider/docker/DockerClientProvider.java   | 15 ++---
 .../provider/tarball/TarballClientProvider.java | 27 
 .../yarn/service/utils/ServiceApiUtil.java  |  4 +-
 .../hadoop/yarn/service/TestServiceApiUtil.java |  9 ++-
 .../providers/TestAbstractClientProvider.java   | 29 -
 .../providers/TestDefaultClientProvider.java| 66 
 9 files changed, 138 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d3c068e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
index 5b3c72c..f10d884 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/exceptions/RestApiErrorMessages.java
@@ -50,6 +50,10 @@ public interface RestApiErrorMessages {
   "Artifact id (like docker image name) is either empty or not provided";
   String ERROR_ARTIFACT_ID_FOR_COMP_INVALID =
   ERROR_ARTIFACT_ID_INVALID + ERROR_SUFFIX_FOR_COMPONENT;
+  String ERROR_ARTIFACT_PATH_FOR_COMP_INVALID = "For component %s with %s "
+  + "artifact, path does not exist: %s";
+  String ERROR_CONFIGFILE_DEST_FILE_FOR_COMP_NOT_ABSOLUTE = "For component %s "
+  + "with %s artifact, dest_file must be a relative path: %s";
 
   String ERROR_RESOURCE_INVALID = "Resource is not provided";
   String ERROR_RESOURCE_FOR_COMP_INVALID =
@@ -89,7 +93,7 @@ public interface RestApiErrorMessages {
   String ERROR_ABSENT_NUM_OF_INSTANCE =
   "Num of instances should appear either globally or per component";
   String ERROR_ABSENT_LAUNCH_COMMAND =
-  "Launch_command is required when type is not DOCKER";
+  "launch_command is required when type is not DOCKER";
 
   String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at"
   + " component level, needs corresponding values set at service level";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d3c068e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
index 672c435..ae79619 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractClientProvider.java
@@ -68,18 +68,18 @@ public abstract class AbstractClientProvider {
* Validate the artifact.
* @param artifact
*/
-  public abstract void validateArtifact(Artifact artifact, FileSystem
-  

hadoop git commit: HADOOP-15593. Fixed NPE in UGI spawnAutoRenewalThreadForUserCreds. Contributed by Gabor Bota

2018-07-26 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 177f6045a -> a869bd970


HADOOP-15593.  Fixed NPE in UGI spawnAutoRenewalThreadForUserCreds.
   Contributed by Gabor Bota

(cherry picked from commit 77721f39e26b630352a1f4087524a3fbd21ff06e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a869bd97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a869bd97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a869bd97

Branch: refs/heads/branch-3.1
Commit: a869bd970e832c4d770b3cee6257225260f4d235
Parents: 177f604
Author: Eric Yang 
Authored: Thu Jul 26 18:35:36 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 18:47:58 2018 -0400

--
 .../hadoop/security/UserGroupInformation.java   | 179 ---
 .../security/TestUserGroupInformation.java  |  38 
 2 files changed, 148 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a869bd97/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 33a876f..c44ef72 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -40,6 +40,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Date;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -850,81 +851,121 @@ public class UserGroupInformation {
 }
 
 //spawn thread only if we have kerb credentials
-Thread t = new Thread(new Runnable() {
+KerberosTicket tgt = getTGT();
+if (tgt == null) {
+  return;
+}
+String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
+long nextRefresh = getRefreshTime(tgt);
+Thread t =
+new Thread(new AutoRenewalForUserCredsRunnable(tgt, cmd, nextRefresh));
+t.setDaemon(true);
+t.setName("TGT Renewer for " + getUserName());
+t.start();
+  }
+
+  @VisibleForTesting
+  class AutoRenewalForUserCredsRunnable implements Runnable {
+private KerberosTicket tgt;
+private RetryPolicy rp;
+private String kinitCmd;
+private long nextRefresh;
+private boolean runRenewalLoop = true;
+
+AutoRenewalForUserCredsRunnable(KerberosTicket tgt, String kinitCmd,
+long nextRefresh){
+  this.tgt = tgt;
+  this.kinitCmd = kinitCmd;
+  this.nextRefresh = nextRefresh;
+  this.rp = null;
+}
+
+public void setRunRenewalLoop(boolean runRenewalLoop) {
+  this.runRenewalLoop = runRenewalLoop;
+}
 
-  @Override
-  public void run() {
-String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
-KerberosTicket tgt = getTGT();
-if (tgt == null) {
+@Override
+public void run() {
+  do {
+try {
+  long now = Time.now();
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Current time is " + now);
+LOG.debug("Next refresh is " + nextRefresh);
+  }
+  if (now < nextRefresh) {
+Thread.sleep(nextRefresh - now);
+  }
+  String output = Shell.execCommand(kinitCmd, "-R");
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Renewed ticket. kinit output: {}", output);
+  }
+  reloginFromTicketCache();
+  tgt = getTGT();
+  if (tgt == null) {
+LOG.warn("No TGT after renewal. Aborting renew thread for " +
+getUserName());
+return;
+  }
+  nextRefresh = Math.max(getRefreshTime(tgt),
+  now + kerberosMinSecondsBeforeRelogin);
+  metrics.renewalFailures.set(0);
+  rp = null;
+} catch (InterruptedException ie) {
+  LOG.warn("Terminating renewal thread");
   return;
-}
-long nextRefresh = getRefreshTime(tgt);
-RetryPolicy rp = null;
-while (true) {
+} catch (IOException ie) {
+  metrics.renewalFailuresTotal.incr();
+  final long now = Time.now();
+
+  if (tgt.isDestroyed()) {
+LOG.error("TGT is destroyed. Aborting renew thread for {}.",
+getUserName());
+return;
+  }
+
+  long tgtEndTime;
+  // As described in HADOOP-15593 we need to handle the case when
+  // tgt.getEndTime() throws 

hadoop git commit: HADOOP-15593. Fixed NPE in UGI spawnAutoRenewalThreadForUserCreds. Contributed by Gabor Bota

2018-07-26 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 40fad3282 -> 77721f39e


HADOOP-15593.  Fixed NPE in UGI spawnAutoRenewalThreadForUserCreds.
   Contributed by Gabor Bota


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77721f39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77721f39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77721f39

Branch: refs/heads/trunk
Commit: 77721f39e26b630352a1f4087524a3fbd21ff06e
Parents: 40fad32
Author: Eric Yang 
Authored: Thu Jul 26 18:35:36 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 18:35:36 2018 -0400

--
 .../hadoop/security/UserGroupInformation.java   | 179 ---
 .../security/TestUserGroupInformation.java  |  38 
 2 files changed, 148 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77721f39/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 29b9fea..6ce72edb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -40,6 +40,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Date;
 import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -851,81 +852,121 @@ public class UserGroupInformation {
 }
 
 //spawn thread only if we have kerb credentials
-Thread t = new Thread(new Runnable() {
+KerberosTicket tgt = getTGT();
+if (tgt == null) {
+  return;
+}
+String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
+long nextRefresh = getRefreshTime(tgt);
+Thread t =
+new Thread(new AutoRenewalForUserCredsRunnable(tgt, cmd, nextRefresh));
+t.setDaemon(true);
+t.setName("TGT Renewer for " + getUserName());
+t.start();
+  }
+
+  @VisibleForTesting
+  class AutoRenewalForUserCredsRunnable implements Runnable {
+private KerberosTicket tgt;
+private RetryPolicy rp;
+private String kinitCmd;
+private long nextRefresh;
+private boolean runRenewalLoop = true;
+
+AutoRenewalForUserCredsRunnable(KerberosTicket tgt, String kinitCmd,
+long nextRefresh){
+  this.tgt = tgt;
+  this.kinitCmd = kinitCmd;
+  this.nextRefresh = nextRefresh;
+  this.rp = null;
+}
+
+public void setRunRenewalLoop(boolean runRenewalLoop) {
+  this.runRenewalLoop = runRenewalLoop;
+}
 
-  @Override
-  public void run() {
-String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
-KerberosTicket tgt = getTGT();
-if (tgt == null) {
+@Override
+public void run() {
+  do {
+try {
+  long now = Time.now();
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Current time is " + now);
+LOG.debug("Next refresh is " + nextRefresh);
+  }
+  if (now < nextRefresh) {
+Thread.sleep(nextRefresh - now);
+  }
+  String output = Shell.execCommand(kinitCmd, "-R");
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Renewed ticket. kinit output: {}", output);
+  }
+  reloginFromTicketCache();
+  tgt = getTGT();
+  if (tgt == null) {
+LOG.warn("No TGT after renewal. Aborting renew thread for " +
+getUserName());
+return;
+  }
+  nextRefresh = Math.max(getRefreshTime(tgt),
+  now + kerberosMinSecondsBeforeRelogin);
+  metrics.renewalFailures.set(0);
+  rp = null;
+} catch (InterruptedException ie) {
+  LOG.warn("Terminating renewal thread");
   return;
-}
-long nextRefresh = getRefreshTime(tgt);
-RetryPolicy rp = null;
-while (true) {
+} catch (IOException ie) {
+  metrics.renewalFailuresTotal.incr();
+  final long now = Time.now();
+
+  if (tgt.isDestroyed()) {
+LOG.error("TGT is destroyed. Aborting renew thread for {}.",
+getUserName());
+return;
+  }
+
+  long tgtEndTime;
+  // As described in HADOOP-15593 we need to handle the case when
+  // tgt.getEndTime() throws NPE because of JDK issue JDK-8147772
+  // NPE is only possible if this 

hadoop git commit: YARN-8545. Return allocated resource to RM for failed container. Contributed by Chandni Singh

2018-07-26 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 8e3807afe -> 177f6045a


YARN-8545.  Return allocated resource to RM for failed container.
Contributed by Chandni Singh

(cherry picked from commit 40fad32824d2f8f960c779d78357e62103453da0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/177f6045
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/177f6045
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/177f6045

Branch: refs/heads/branch-3.1
Commit: 177f6045ac4ae6e2dbae2e04da8c9cebb5da8748
Parents: 8e3807a
Author: Eric Yang 
Authored: Thu Jul 26 18:22:57 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 18:25:41 2018 -0400

--
 .../hadoop/yarn/service/ServiceScheduler.java   |  3 +-
 .../yarn/service/component/Component.java   | 42 +++-
 .../component/instance/ComponentInstance.java   | 21 +++---
 .../instance/ComponentInstanceEvent.java|  2 +
 .../containerlaunch/ContainerLaunchService.java | 12 --
 .../hadoop/yarn/service/MockServiceAM.java  | 34 +++-
 .../hadoop/yarn/service/TestServiceAM.java  | 35 
 .../yarn/service/component/TestComponent.java   |  3 +-
 .../instance/TestComponentInstance.java | 26 ++--
 9 files changed, 135 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/177f6045/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index d3e8e4f..cfaf356 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -687,7 +687,8 @@ public class ServiceScheduler extends CompositeService {
 }
 ComponentEvent event =
 new ComponentEvent(instance.getCompName(), CONTAINER_COMPLETED)
-.setStatus(status).setInstance(instance);
+.setStatus(status).setInstance(instance)
+.setContainerId(containerId);
 dispatcher.getEventHandler().handle(event);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/177f6045/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index a1ee796..aaa23da 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.service.component;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
@@ -518,10 +519,10 @@ public class Component implements 
EventHandler {
   private static class ContainerCompletedTransition extends BaseTransition {
 @Override
 public void transition(Component component, ComponentEvent event) {
-
+  Preconditions.checkNotNull(event.getContainerId());
   component.updateMetrics(event.getStatus());
   component.dispatcher.getEventHandler().handle(
-  new ComponentInstanceEvent(event.getStatus().getContainerId(), STOP)
+  new ComponentInstanceEvent(event.getContainerId(), STOP)
   

hadoop git commit: YARN-8545. Return allocated resource to RM for failed container. Contributed by Chandni Singh

2018-07-26 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk d70d84570 -> 40fad3282


YARN-8545.  Return allocated resource to RM for failed container.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40fad328
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40fad328
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40fad328

Branch: refs/heads/trunk
Commit: 40fad32824d2f8f960c779d78357e62103453da0
Parents: d70d845
Author: Eric Yang 
Authored: Thu Jul 26 18:22:57 2018 -0400
Committer: Eric Yang 
Committed: Thu Jul 26 18:22:57 2018 -0400

--
 .../hadoop/yarn/service/ServiceScheduler.java   |  3 +-
 .../yarn/service/component/Component.java   | 42 +++-
 .../component/instance/ComponentInstance.java   | 21 +++---
 .../instance/ComponentInstanceEvent.java|  2 +
 .../containerlaunch/ContainerLaunchService.java | 12 --
 .../hadoop/yarn/service/MockServiceAM.java  | 34 +++-
 .../hadoop/yarn/service/TestServiceAM.java  | 35 
 .../yarn/service/component/TestComponent.java   |  3 +-
 .../instance/TestComponentInstance.java | 26 ++--
 9 files changed, 135 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40fad328/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index d3e8e4f..cfaf356 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -687,7 +687,8 @@ public class ServiceScheduler extends CompositeService {
 }
 ComponentEvent event =
 new ComponentEvent(instance.getCompName(), CONTAINER_COMPLETED)
-.setStatus(status).setInstance(instance);
+.setStatus(status).setInstance(instance)
+.setContainerId(containerId);
 dispatcher.getEventHandler().handle(event);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40fad328/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
index a1ee796..aaa23da 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.service.component;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
@@ -518,10 +519,10 @@ public class Component implements 
EventHandler {
   private static class ContainerCompletedTransition extends BaseTransition {
 @Override
 public void transition(Component component, ComponentEvent event) {
-
+  Preconditions.checkNotNull(event.getContainerId());
   component.updateMetrics(event.getStatus());
   component.dispatcher.getEventHandler().handle(
-  new ComponentInstanceEvent(event.getStatus().getContainerId(), STOP)
+  new ComponentInstanceEvent(event.getContainerId(), STOP)
   .setStatus(event.getStatus()));
 
   ComponentRestartPolicy restartPolicy =
@@ -784,28 

hadoop git commit: HDDS-291. Initialize hadoop metrics system in standalone hdds datanodes. Contributed by Elek Marton.

2018-07-26 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk fd31cb6cf -> d70d84570


HDDS-291. Initialize hadoop metrics system in standalone hdds datanodes. 
Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d70d8457
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d70d8457
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d70d8457

Branch: refs/heads/trunk
Commit: d70d84570575574b7e3ad0f00baf54f1dde76d97
Parents: fd31cb6
Author: Xiaoyu Yao 
Authored: Thu Jul 26 13:17:37 2018 -0700
Committer: Xiaoyu Yao 
Committed: Thu Jul 26 13:17:37 2018 -0700

--
 .../src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java | 2 ++
 .../ozone/container/common/statemachine/SCMConnectionManager.java  | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d70d8457/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index ddeec87..f359e72 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.statemachine
 .DatanodeStateMachine;
@@ -241,6 +242,7 @@ public class HddsDatanodeService implements ServicePlugin {
 System.exit(1);
   }
   StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG);
+  DefaultMetricsSystem.initialize("HddsDatanode");
   HddsDatanodeService hddsDatanodeService =
   createHddsDatanodeService(conf);
   hddsDatanodeService.start(null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d70d8457/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
index 19722f0..85fb580 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
@@ -67,7 +67,7 @@ public class SCMConnectionManager
 this.rpcTimeout = timeOut.intValue();
 this.scmMachines = new HashMap<>();
 this.conf = conf;
-jmxBean = MBeans.register("OzoneDataNode",
+jmxBean = MBeans.register("HddsDatanode",
 "SCMConnectionManager",
 this);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-277. PipelineStateMachine should handle closure of pipelines in SCM. Contributed by Mukul Kumar Singh.

2018-07-26 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk be150a17b -> fd31cb6cf


HDDS-277. PipelineStateMachine should handle closure of pipelines in SCM. 
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd31cb6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd31cb6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd31cb6c

Branch: refs/heads/trunk
Commit: fd31cb6cfeef0c7e9bb0a054cb0f78853df8976f
Parents: be150a1
Author: Xiaoyu Yao 
Authored: Thu Jul 26 13:15:27 2018 -0700
Committer: Xiaoyu Yao 
Committed: Thu Jul 26 13:15:55 2018 -0700

--
 .../container/common/helpers/ContainerInfo.java |   7 +-
 .../container/CloseContainerEventHandler.java   |  28 ++--
 .../hdds/scm/container/ContainerMapping.java|  16 +-
 .../scm/container/ContainerStateManager.java|  11 ++
 .../scm/container/states/ContainerStateMap.java |   2 +-
 .../hdds/scm/pipelines/Node2PipelineMap.java|  33 ++--
 .../hdds/scm/pipelines/PipelineManager.java |  31 ++--
 .../hdds/scm/pipelines/PipelineSelector.java|  70 +++--
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  14 +-
 .../standalone/StandaloneManagerImpl.java   |  13 +-
 .../scm/server/StorageContainerManager.java |   2 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java |   4 +-
 .../TestCloseContainerEventHandler.java |  13 +-
 .../scm/container/TestContainerMapping.java |   4 +-
 .../container/closer/TestContainerCloser.java   |   4 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   3 +-
 .../hdds/scm/pipeline/TestPipelineClose.java| 152 +++
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |   4 +-
 18 files changed, 331 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd31cb6c/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
index 4074b21..b194c14 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -459,12 +459,13 @@ public class ContainerInfo implements 
Comparator,
 
   /**
* Check if a container is in open state, this will check if the
-   * container is either open or allocated or creating. Any containers in
-   * these states is managed as an open container by SCM.
+   * container is either open, allocated, creating or creating.
+   * Any containers in these states is managed as an open container by SCM.
*/
   public boolean isContainerOpen() {
 return state == HddsProtos.LifeCycleState.ALLOCATED ||
 state == HddsProtos.LifeCycleState.CREATING ||
-state == HddsProtos.LifeCycleState.OPEN;
+state == HddsProtos.LifeCycleState.OPEN ||
+state == HddsProtos.LifeCycleState.CLOSING;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd31cb6c/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index 859e5d5..949eb13 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
@@ -63,13 +62,13 @@ public class CloseContainerEventHandler implements 
EventHandler {
   containerManager.getContainerWithPipeline(containerID.getId());
   info = containerWithPipeline.getContainerInfo();
   if (info == null) {
-LOG.info("Failed to update the container state. Container with id : 

hadoop git commit: HDFS-13622. mkdir should print the parent directory in the error message when parent directories do not exist. Contributed by Shweta.

2018-07-26 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk a19229594 -> be150a17b


HDFS-13622. mkdir should print the parent directory in the error message when 
parent directories do not exist. Contributed by Shweta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be150a17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be150a17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be150a17

Branch: refs/heads/trunk
Commit: be150a17b15d15f5de6d4839d5e805e8d6c57850
Parents: a192295
Author: Xiao Chen 
Authored: Thu Jul 26 10:23:30 2018 -0700
Committer: Xiao Chen 
Committed: Thu Jul 26 10:24:32 2018 -0700

--
 .../main/java/org/apache/hadoop/fs/shell/Mkdir.java| 13 -
 .../test/java/org/apache/hadoop/hdfs/TestDFSShell.java |  8 
 .../hadoop-hdfs/src/test/resources/testHDFSConf.xml|  4 ++--
 3 files changed, 18 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be150a17/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
index 9f39da2..5828b0b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
@@ -68,11 +68,14 @@ class Mkdir extends FsCommand {
 
   @Override
   protected void processNonexistentPath(PathData item) throws IOException {
-// check if parent exists. this is complicated because getParent(a/b/c/) 
returns a/b/c, but
-// we want a/b
-if (!createParents &&
-!item.fs.exists(new Path(item.path.toString()).getParent())) {
-  throw new PathNotFoundException(item.toString());
+if (!createParents) {
+  // check if parent exists. this is complicated because getParent(a/b/c/) 
returns a/b/c, but
+  // we want a/b
+  final Path itemPath = new Path(item.path.toString());
+  final Path itemParentPath = itemPath.getParent();
+  if (!item.fs.exists(itemParentPath)) {
+throw new PathNotFoundException(itemParentPath.toString());
+  }
 }
 if (!item.fs.mkdirs(item.path)) {
   throw new PathIOException(item.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be150a17/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index b19bdea..1d2042e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -721,6 +721,14 @@ public class TestDFSShell {
   assertTrue(" -mkdir returned this is a file ",
   (returned.lastIndexOf("not a directory") != -1));
   out.reset();
+  argv[0] = "-mkdir";
+  argv[1] = "/testParent/testChild";
+  ret = ToolRunner.run(shell, argv);
+  returned = out.toString();
+  assertEquals(" -mkdir returned 1", 1, ret);
+  assertTrue(" -mkdir returned there is No file or directory but has 
testChild in the path",
+  (returned.lastIndexOf("testChild") == -1));
+  out.reset();
   argv = new String[3];
   argv[0] = "-mv";
   argv[1] = "/testfile";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be150a17/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index a13c441..4ab093b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -6183,11 +6183,11 @@
   
 
   RegexpComparator
-  mkdir: `dir0/dir1': No such file or 
directory
+  .*mkdir:.*dir0': No such file or 
directory$
 
   
 
-
+
  
   mkdir: Test recreate of existing directory 
fails
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-201. Add name for LeaseManager. Contributed by Sandeep Nemuri.

2018-07-26 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9089790ca -> a19229594


HDDS-201. Add name for LeaseManager. Contributed by Sandeep Nemuri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1922959
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1922959
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1922959

Branch: refs/heads/trunk
Commit: a19229594e48fad9f50dbdb1f0b2fcbf7443ce66
Parents: 9089790
Author: Nanda kumar 
Authored: Thu Jul 26 19:00:23 2018 +0530
Committer: Nanda kumar 
Committed: Thu Jul 26 19:00:23 2018 +0530

--
 .../apache/hadoop/ozone/lease/LeaseManager.java | 14 -
 .../hadoop/ozone/lease/TestLeaseManager.java| 21 ++--
 .../hdds/server/events/TestEventWatcher.java|  2 +-
 .../hdds/scm/container/ContainerMapping.java|  4 ++--
 .../hdds/scm/pipelines/PipelineSelector.java|  4 ++--
 .../scm/server/StorageContainerManager.java |  3 ++-
 .../replication/TestReplicationManager.java |  4 ++--
 7 files changed, 28 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1922959/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
index b8390dd..756a41a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java
@@ -42,6 +42,7 @@ public class LeaseManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(LeaseManager.class);
 
+  private final String name;
   private final long defaultTimeout;
   private Map> activeLeases;
   private LeaseMonitor leaseMonitor;
@@ -51,10 +52,13 @@ public class LeaseManager {
   /**
* Creates an instance of lease manager.
*
+   * @param name
+   *Name for the LeaseManager instance.
* @param defaultTimeout
*Default timeout in milliseconds to be used for lease creation.
*/
-  public LeaseManager(long defaultTimeout) {
+  public LeaseManager(String name, long defaultTimeout) {
+this.name = name;
 this.defaultTimeout = defaultTimeout;
   }
 
@@ -62,11 +66,11 @@ public class LeaseManager {
* Starts the lease manager service.
*/
   public void start() {
-LOG.debug("Starting LeaseManager service");
+LOG.debug("Starting {} LeaseManager service", name);
 activeLeases = new ConcurrentHashMap<>();
 leaseMonitor = new LeaseMonitor();
 leaseMonitorThread = new Thread(leaseMonitor);
-leaseMonitorThread.setName("LeaseManager#LeaseMonitor");
+leaseMonitorThread.setName(name + "-LeaseManager#LeaseMonitor");
 leaseMonitorThread.setDaemon(true);
 leaseMonitorThread.setUncaughtExceptionHandler((thread, throwable) -> {
   // Let us just restart this thread after logging an error.
@@ -75,7 +79,7 @@ public class LeaseManager {
   thread.toString(), throwable);
   leaseMonitorThread.start();
 });
-LOG.debug("Starting LeaseManager#LeaseMonitor Thread");
+LOG.debug("Starting {}-LeaseManager#LeaseMonitor Thread", name);
 leaseMonitorThread.start();
 isRunning = true;
   }
@@ -203,7 +207,7 @@ public class LeaseManager {
 @Override
 public void run() {
   while(monitor) {
-LOG.debug("LeaseMonitor: checking for lease expiry");
+LOG.debug("{}-LeaseMonitor: checking for lease expiry", name);
 long sleepTime = Long.MAX_VALUE;
 
 for (T resource : activeLeases.keySet()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1922959/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
--
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
index 517c1a7..bdc70fc 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java
@@ -67,7 +67,7 @@ public class TestLeaseManager {
   public void testLeaseAcquireAndRelease() throws LeaseException {
 //It is assumed that the test case execution won't take more than 5 
seconds,
 //if it takes more time increase the defaultTimeout value of LeaseManager.
-LeaseManager manager = new LeaseManager<>(5000);
+LeaseManager manager = new LeaseManager<>("Test", 5000);
 manager.start();
 

hadoop git commit: HADOOP-15611. Log more details for FairCallQueue. Contributed by Ryan Wu.

2018-07-26 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk f93ecf5c1 -> 9089790ca


HADOOP-15611. Log more details for FairCallQueue. Contributed by Ryan Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9089790c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9089790c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9089790c

Branch: refs/heads/trunk
Commit: 9089790cabb4771198be0fe64c1317a3ff1c80f1
Parents: f93ecf5
Author: Yiqun Lin 
Authored: Thu Jul 26 18:08:28 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Jul 26 18:08:28 2018 +0800

--
 .../main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java   | 8 
 .../org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java | 3 +++
 2 files changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9089790c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
index f12ecb6..8bb0ce4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
@@ -391,6 +391,7 @@ public class DecayRpcScheduler implements RpcScheduler,
* counts current.
*/
   private void decayCurrentCounts() {
+LOG.debug("Start to decay current counts.");
 try {
   long totalDecayedCount = 0;
   long totalRawCount = 0;
@@ -410,7 +411,12 @@ public class DecayRpcScheduler implements RpcScheduler,
 totalDecayedCount += nextValue;
 decayedCount.set(nextValue);
 
+LOG.debug("Decaying counts for the user: {}, " +
+"its decayedCount: {}, rawCount: {}", entry.getKey(),
+nextValue, rawCount.get());
 if (nextValue == 0) {
+  LOG.debug("The decayed count for the user {} is zero " +
+  "and being cleaned.", entry.getKey());
   // We will clean up unused keys here. An interesting optimization
   // might be to have an upper bound on keyspace in callCounts and only
   // clean once we pass it.
@@ -422,6 +428,8 @@ public class DecayRpcScheduler implements RpcScheduler,
   totalDecayedCallCount.set(totalDecayedCount);
   totalRawCallCount.set(totalRawCount);
 
+  LOG.debug("After decaying the stored counts, totalDecayedCount: {}, " +
+  "totalRawCallCount: {}.", totalDecayedCount, totalRawCount);
   // Now refresh the cache of scheduling decisions
   recomputeScheduleCache();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9089790c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
index d308725..096cc1a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
@@ -109,6 +109,9 @@ public class WeightedRoundRobinMultiplexer implements 
RpcMultiplexer {
 // Finally, reset requestsLeft. This will enable moveToNextQueue to be
 // called again, for the new currentQueueIndex
 this.requestsLeft.set(this.queueWeights[nextIdx]);
+LOG.debug("Moving to next queue from queue index {} to index {}, " +
+"number of requests left for current queue: {}.",
+thisIdx, nextIdx, requestsLeft);
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org