hadoop git commit: HDDS-380. Remove synchronization from ChunkGroupOutputStream and ChunkOutputStream. Contributed by Shashikant Banerjee.

2018-08-29 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3fa463942 -> 0bd421719


HDDS-380. Remove synchronization from ChunkGroupOutputStream and 
ChunkOutputStream. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bd42171
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bd42171
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bd42171

Branch: refs/heads/trunk
Commit: 0bd4217194ae50ec30e386b200fcfa54c069f042
Parents: 3fa4639
Author: Nanda kumar 
Authored: Wed Aug 29 13:31:19 2018 +0530
Committer: Nanda kumar 
Committed: Wed Aug 29 13:31:19 2018 +0530

--
 .../hadoop/hdds/scm/storage/ChunkOutputStream.java  | 16 
 .../ozone/client/io/ChunkGroupOutputStream.java | 12 ++--
 2 files changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bd42171/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index f2df3fa..8d311d0 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -99,7 +99,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void write(int b) throws IOException {
+  public void write(int b) throws IOException {
 checkOpen();
 int rollbackPosition = buffer.position();
 int rollbackLimit = buffer.limit();
@@ -110,7 +110,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void write(byte[] b, int off, int len)
+  public void write(byte[] b, int off, int len)
   throws IOException {
 if (b == null) {
   throw new NullPointerException();
@@ -137,7 +137,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void flush() throws IOException {
+  public void flush() throws IOException {
 checkOpen();
 if (buffer.position() > 0) {
   int rollbackPosition = buffer.position();
@@ -147,7 +147,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void close() throws IOException {
+  public void close() throws IOException {
 if (xceiverClientManager != null && xceiverClient != null
 && buffer != null) {
   if (buffer.position() > 0) {
@@ -164,7 +164,7 @@ public class ChunkOutputStream extends OutputStream {
 }
   }
 
-  public synchronized void cleanup() {
+  public void cleanup() {
 xceiverClientManager.releaseClient(xceiverClient);
 xceiverClientManager = null;
 xceiverClient = null;
@@ -176,7 +176,7 @@ public class ChunkOutputStream extends OutputStream {
*
* @throws IOException if stream is closed
*/
-  private synchronized void checkOpen() throws IOException {
+  private void checkOpen() throws IOException {
 if (xceiverClient == null) {
   throw new IOException("ChunkOutputStream has been closed.");
 }
@@ -191,7 +191,7 @@ public class ChunkOutputStream extends OutputStream {
* @param rollbackLimit limit to restore in buffer if write fails
* @throws IOException if there is an I/O error while performing the call
*/
-  private synchronized void flushBufferToChunk(int rollbackPosition,
+  private void flushBufferToChunk(int rollbackPosition,
   int rollbackLimit) throws IOException {
 boolean success = false;
 try {
@@ -213,7 +213,7 @@ public class ChunkOutputStream extends OutputStream {
*
* @throws IOException if there is an I/O error while performing the call
*/
-  private synchronized void writeChunkToContainer() throws IOException {
+  private void writeChunkToContainer() throws IOException {
 buffer.flip();
 ByteString data = ByteString.copyFrom(buffer);
 ChunkInfo chunk = ChunkInfo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bd42171/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 988af07..00624d5 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/had

hadoop git commit: HDFS-13634. RBF: Configurable value in xml for async connection request queue size. Contributed by CR Hota.

2018-08-29 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0bd421719 -> a0ebb6b39


HDFS-13634. RBF: Configurable value in xml for async connection request queue 
size. Contributed by CR Hota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0ebb6b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0ebb6b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0ebb6b3

Branch: refs/heads/trunk
Commit: a0ebb6b39f2932d3ea2fb5e287f52b841e108428
Parents: 0bd4217
Author: Yiqun Lin 
Authored: Wed Aug 29 16:15:22 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Aug 29 16:15:22 2018 +0800

--
 .../federation/router/ConnectionManager.java  | 18 +++---
 .../server/federation/router/RBFConfigKeys.java   |  5 +
 .../src/main/resources/hdfs-rbf-default.xml   |  8 
 3 files changed, 24 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0ebb6b3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
index 0b50845..9fb83e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
@@ -49,9 +49,6 @@ public class ConnectionManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(ConnectionManager.class);
 
-  /** Number of parallel new connections to create. */
-  protected static final int MAX_NEW_CONNECTIONS = 100;
-
   /** Minimum amount of active connections: 50%. */
   protected static final float MIN_ACTIVE_RATIO = 0.5f;
 
@@ -77,8 +74,10 @@ public class ConnectionManager {
   private final Lock writeLock = readWriteLock.writeLock();
 
   /** Queue for creating new connections. */
-  private final BlockingQueue creatorQueue =
-  new ArrayBlockingQueue<>(MAX_NEW_CONNECTIONS);
+  private final BlockingQueue creatorQueue;
+  /** Max size of queue for creating new connections. */
+  private final int creatorQueueMaxSize;
+
   /** Create new connections asynchronously. */
   private final ConnectionCreator creator;
   /** Periodic executor to remove stale connection pools. */
@@ -106,7 +105,12 @@ public class ConnectionManager {
 this.pools = new HashMap<>();
 
 // Create connections in a thread asynchronously
-this.creator = new ConnectionCreator(creatorQueue);
+this.creatorQueueMaxSize = this.conf.getInt(
+RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE,
+RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE_DEFAULT
+);
+this.creatorQueue = new ArrayBlockingQueue<>(this.creatorQueueMaxSize);
+this.creator = new ConnectionCreator(this.creatorQueue);
 this.creator.setDaemon(true);
 
 // Cleanup periods
@@ -213,7 +217,7 @@ public class ConnectionManager {
 if (conn == null || !conn.isUsable()) {
   if (!this.creatorQueue.offer(pool)) {
 LOG.error("Cannot add more than {} connections at the same time",
-MAX_NEW_CONNECTIONS);
+this.creatorQueueMaxSize);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0ebb6b3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 87df5d2..997e1dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -93,6 +93,11 @@ public class RBFConfigKeys extends 
CommonConfigurationKeysPublic {
   TimeUnit.SECONDS.toMillis(5);
 
   // HDFS Router NN client
+  public static final String
+  DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE =
+  FEDERATION_ROUTER_PREFIX + "connection.creator.queue-size";
+  public static final int
+  DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE_DEFAULT = 100;
   public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
 

hadoop git commit: HDFS-13634. RBF: Configurable value in xml for async connection request queue size. Contributed by CR Hota.

2018-08-29 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 f164568b4 -> b9b2b97e9


HDFS-13634. RBF: Configurable value in xml for async connection request queue 
size. Contributed by CR Hota.

(cherry picked from commit a0ebb6b39f2932d3ea2fb5e287f52b841e108428)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9b2b97e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9b2b97e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9b2b97e

Branch: refs/heads/branch-3.1
Commit: b9b2b97e9de69109d1611a49f6109960f36ca79c
Parents: f164568
Author: Yiqun Lin 
Authored: Wed Aug 29 16:15:22 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Aug 29 16:18:15 2018 +0800

--
 .../federation/router/ConnectionManager.java  | 18 +++---
 .../server/federation/router/RBFConfigKeys.java   |  5 +
 .../src/main/resources/hdfs-rbf-default.xml   |  8 
 3 files changed, 24 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9b2b97e/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
index 0b50845..9fb83e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
@@ -49,9 +49,6 @@ public class ConnectionManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(ConnectionManager.class);
 
-  /** Number of parallel new connections to create. */
-  protected static final int MAX_NEW_CONNECTIONS = 100;
-
   /** Minimum amount of active connections: 50%. */
   protected static final float MIN_ACTIVE_RATIO = 0.5f;
 
@@ -77,8 +74,10 @@ public class ConnectionManager {
   private final Lock writeLock = readWriteLock.writeLock();
 
   /** Queue for creating new connections. */
-  private final BlockingQueue creatorQueue =
-  new ArrayBlockingQueue<>(MAX_NEW_CONNECTIONS);
+  private final BlockingQueue creatorQueue;
+  /** Max size of queue for creating new connections. */
+  private final int creatorQueueMaxSize;
+
   /** Create new connections asynchronously. */
   private final ConnectionCreator creator;
   /** Periodic executor to remove stale connection pools. */
@@ -106,7 +105,12 @@ public class ConnectionManager {
 this.pools = new HashMap<>();
 
 // Create connections in a thread asynchronously
-this.creator = new ConnectionCreator(creatorQueue);
+this.creatorQueueMaxSize = this.conf.getInt(
+RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE,
+RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE_DEFAULT
+);
+this.creatorQueue = new ArrayBlockingQueue<>(this.creatorQueueMaxSize);
+this.creator = new ConnectionCreator(this.creatorQueue);
 this.creator.setDaemon(true);
 
 // Cleanup periods
@@ -213,7 +217,7 @@ public class ConnectionManager {
 if (conn == null || !conn.isUsable()) {
   if (!this.creatorQueue.offer(pool)) {
 LOG.error("Cannot add more than {} connections at the same time",
-MAX_NEW_CONNECTIONS);
+this.creatorQueueMaxSize);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9b2b97e/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 87df5d2..997e1dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -93,6 +93,11 @@ public class RBFConfigKeys extends 
CommonConfigurationKeysPublic {
   TimeUnit.SECONDS.toMillis(5);
 
   // HDFS Router NN client
+  public static final String
+  DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE =
+  FEDERATION_ROUTER_PREFIX + "connection.creator.queue-size";
+  public static final int
+  DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE_DEFAULT = 100

hadoop git commit: HDFS-13634. RBF: Configurable value in xml for async connection request queue size. Contributed by CR Hota.

2018-08-29 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2a8f4aefd -> 8137b473a


HDFS-13634. RBF: Configurable value in xml for async connection request queue 
size. Contributed by CR Hota.

(cherry picked from commit a0ebb6b39f2932d3ea2fb5e287f52b841e108428)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8137b473
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8137b473
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8137b473

Branch: refs/heads/branch-2
Commit: 8137b473aa6fbe11283dd340894ef537e3b56f8f
Parents: 2a8f4ae
Author: Yiqun Lin 
Authored: Wed Aug 29 16:15:22 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Aug 29 16:23:17 2018 +0800

--
 .../federation/router/ConnectionManager.java  | 18 +++---
 .../server/federation/router/RBFConfigKeys.java   |  5 +
 .../src/main/resources/hdfs-rbf-default.xml   |  8 
 3 files changed, 24 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8137b473/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
index f62af1f..2ac07ca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
@@ -49,9 +49,6 @@ public class ConnectionManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(ConnectionManager.class);
 
-  /** Number of parallel new connections to create. */
-  protected static final int MAX_NEW_CONNECTIONS = 100;
-
   /** Minimum amount of active connections: 50%. */
   protected static final float MIN_ACTIVE_RATIO = 0.5f;
 
@@ -77,8 +74,10 @@ public class ConnectionManager {
   private final Lock writeLock = readWriteLock.writeLock();
 
   /** Queue for creating new connections. */
-  private final BlockingQueue creatorQueue =
-  new ArrayBlockingQueue<>(MAX_NEW_CONNECTIONS);
+  private final BlockingQueue creatorQueue;
+  /** Max size of queue for creating new connections. */
+  private final int creatorQueueMaxSize;
+
   /** Create new connections asynchronously. */
   private final ConnectionCreator creator;
   /** Periodic executor to remove stale connection pools. */
@@ -106,7 +105,12 @@ public class ConnectionManager {
 this.pools = new HashMap<>();
 
 // Create connections in a thread asynchronously
-this.creator = new ConnectionCreator(creatorQueue);
+this.creatorQueueMaxSize = this.conf.getInt(
+RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE,
+RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE_DEFAULT
+);
+this.creatorQueue = new ArrayBlockingQueue<>(this.creatorQueueMaxSize);
+this.creator = new ConnectionCreator(this.creatorQueue);
 this.creator.setDaemon(true);
 
 // Cleanup periods
@@ -213,7 +217,7 @@ public class ConnectionManager {
 if (conn == null || !conn.isUsable()) {
   if (!this.creatorQueue.offer(pool)) {
 LOG.error("Cannot add more than {} connections at the same time",
-MAX_NEW_CONNECTIONS);
+this.creatorQueueMaxSize);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8137b473/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 87df5d2..997e1dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -93,6 +93,11 @@ public class RBFConfigKeys extends 
CommonConfigurationKeysPublic {
   TimeUnit.SECONDS.toMillis(5);
 
   // HDFS Router NN client
+  public static final String
+  DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE =
+  FEDERATION_ROUTER_PREFIX + "connection.creator.queue-size";
+  public static final int
+  DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE_DEFAULT = 100;
  

hadoop git commit: YARN-8642. Add support for tmpfs mounts with the Docker runtime. Contributed by Craig Condit

2018-08-29 Thread skumpf
Repository: hadoop
Updated Branches:
  refs/heads/trunk a0ebb6b39 -> 73625168c


YARN-8642. Add support for tmpfs mounts with the Docker runtime. Contributed by 
Craig Condit


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73625168
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73625168
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73625168

Branch: refs/heads/trunk
Commit: 73625168c0f29aa646d7a715c9fb15e43d6c7e05
Parents: a0ebb6b
Author: Shane Kumpf 
Authored: Wed Aug 29 07:08:37 2018 -0600
Committer: Shane Kumpf 
Committed: Wed Aug 29 07:08:37 2018 -0600

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   5 +
 .../src/main/resources/yarn-default.xml |   7 +
 .../runtime/DockerLinuxContainerRuntime.java|  38 +
 .../linux/runtime/docker/DockerRunCommand.java  |   5 +
 .../container-executor/impl/utils/docker-util.c |  42 ++
 .../container-executor/impl/utils/docker-util.h |   3 +-
 .../test/utils/test_docker_util.cc  |  64 
 .../runtime/TestDockerContainerRuntime.java | 149 +++
 .../runtime/docker/TestDockerRunCommand.java|   5 +-
 .../src/site/markdown/DockerContainers.md   |   1 +
 10 files changed, 317 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73625168/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 148edb9..d525e4d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2012,6 +2012,11 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_DOCKER_DEFAULT_RW_MOUNTS =
   DOCKER_CONTAINER_RUNTIME_PREFIX + "default-rw-mounts";
 
+  /** The default list of tmpfs mounts to be mounted into all
+   *  Docker containers that use DockerContainerRuntime. */
+  public static final String NM_DOCKER_DEFAULT_TMPFS_MOUNTS =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "default-tmpfs-mounts";
+
   /** The mode in which the Java Container Sandbox should run detailed by
*  the JavaSandboxLinuxContainerRuntime. */
   public static final String YARN_CONTAINER_SANDBOX =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73625168/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 72e42d8..4262436 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1828,6 +1828,13 @@
   
 
   
+The default list of tmpfs mounts to be mounted into all Docker
+  containers that use DockerContainerRuntime.
+yarn.nodemanager.runtime.linux.docker.default-tmpfs-mounts
+
+  
+
+  
 The mode in which the Java Container Sandbox should run 
detailed by
   the JavaSandboxLinuxContainerRuntime.
 yarn.nodemanager.runtime.linux.sandbox-mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73625168/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 00771ff..0ae3d0f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop

hadoop git commit: YARN-8642. Add support for tmpfs mounts with the Docker runtime. Contributed by Craig Condit

2018-08-29 Thread skumpf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 b9b2b97e9 -> b8618556e


YARN-8642. Add support for tmpfs mounts with the Docker runtime. Contributed by 
Craig Condit

(cherry picked from commit 73625168c0f29aa646d7a715c9fb15e43d6c7e05)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8618556
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8618556
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8618556

Branch: refs/heads/branch-3.1
Commit: b8618556eee0ff966f39480f9cfb6a1e28d4813f
Parents: b9b2b97
Author: Shane Kumpf 
Authored: Wed Aug 29 07:08:37 2018 -0600
Committer: Shane Kumpf 
Committed: Wed Aug 29 07:11:38 2018 -0600

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   5 +
 .../src/main/resources/yarn-default.xml |   7 +
 .../runtime/DockerLinuxContainerRuntime.java|  38 +
 .../linux/runtime/docker/DockerRunCommand.java  |   5 +
 .../container-executor/impl/utils/docker-util.c |  42 ++
 .../container-executor/impl/utils/docker-util.h |   3 +-
 .../test/utils/test_docker_util.cc  |  64 
 .../runtime/TestDockerContainerRuntime.java | 149 +++
 .../runtime/docker/TestDockerRunCommand.java|   5 +-
 .../src/site/markdown/DockerContainers.md   |   1 +
 10 files changed, 317 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8618556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index affa76a..9a5ed97 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1959,6 +1959,11 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_DOCKER_DEFAULT_RW_MOUNTS =
   DOCKER_CONTAINER_RUNTIME_PREFIX + "default-rw-mounts";
 
+  /** The default list of tmpfs mounts to be mounted into all
+   *  Docker containers that use DockerContainerRuntime. */
+  public static final String NM_DOCKER_DEFAULT_TMPFS_MOUNTS =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "default-tmpfs-mounts";
+
   /** The mode in which the Java Container Sandbox should run detailed by
*  the JavaSandboxLinuxContainerRuntime. */
   public static final String YARN_CONTAINER_SANDBOX =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8618556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index a54f780..412fd02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1804,6 +1804,13 @@
   
 
   
+The default list of tmpfs mounts to be mounted into all Docker
+  containers that use DockerContainerRuntime.
+yarn.nodemanager.runtime.linux.docker.default-tmpfs-mounts
+
+  
+
+  
 The mode in which the Java Container Sandbox should run 
detailed by
   the JavaSandboxLinuxContainerRuntime.
 yarn.nodemanager.runtime.linux.sandbox-mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8618556/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 106aff1..7c2e2b9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/h

hadoop git commit: HDDS-280. Support ozone dist-start-stitching on openbsd/osx. Contributed by Elek, Marton.

2018-08-29 Thread msingh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 73625168c -> 692736f7c


HDDS-280. Support ozone dist-start-stitching on openbsd/osx. Contributed by 
Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/692736f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/692736f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/692736f7

Branch: refs/heads/trunk
Commit: 692736f7cfb72b8932dc2eb4f4faa995dc6521f8
Parents: 7362516
Author: Mukul Kumar Singh 
Authored: Thu Aug 30 02:21:24 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Thu Aug 30 02:21:24 2018 +0530

--
 dev-support/bin/ozone-dist-layout-stitching   |  6 +++---
 dev-support/bin/ozone-dist-tar-stitching  |  9 ++---
 hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh |  2 +-
 .../acceptance-test/dev-support/bin/robot-dnd-all.sh  | 10 ++
 hadoop-ozone/acceptance-test/dev-support/bin/robot.sh |  7 ---
 hadoop-ozone/acceptance-test/pom.xml  |  7 +++
 .../src/test/acceptance/basic/ozone-shell.robot   |  1 -
 .../acceptance-test/src/test/acceptance/commonlib.robot   |  2 +-
 hadoop-ozone/common/pom.xml   |  5 +
 hadoop-ozone/docs/content/GettingStarted.md   |  3 ++-
 hadoop-ozone/pom.xml  |  5 +
 11 files changed, 24 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index 2ba7791..1ba652c 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -117,9 +117,9 @@ ROOT=$(cd "${BASEDIR}"/../..;pwd)
 echo
 echo "Current directory $(pwd)"
 echo
-run rm -rf "ozone"
-run mkdir "ozone"
-run cd "ozone"
+run rm -rf "ozone-${HDDS_VERSION}"
+run mkdir "ozone-${HDDS_VERSION}"
+run cd "ozone-${HDDS_VERSION}"
 run cp -p "${ROOT}/LICENSE.txt" .
 run cp -p "${ROOT}/NOTICE.txt" .
 run cp -p "${ROOT}/README.txt" .

http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/dev-support/bin/ozone-dist-tar-stitching
--
diff --git a/dev-support/bin/ozone-dist-tar-stitching 
b/dev-support/bin/ozone-dist-tar-stitching
index d1116e4..93d0525 100755
--- a/dev-support/bin/ozone-dist-tar-stitching
+++ b/dev-support/bin/ozone-dist-tar-stitching
@@ -36,13 +36,8 @@ function run()
   fi
 }
 
-#To make the final dist directory easily mountable from docker we don't use
-#version name in the directory name.
-#To include the version name in the root directory of the tar file
-# we create a symbolic link and dereference it during the tar creation
-ln -s -f ozone ozone-${VERSION}
-run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
+run tar -c -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
 run gzip -f "ozone-${VERSION}.tar"
 echo
 echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz"
-echo
\ No newline at end of file
+echo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
--
diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh 
b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
index ee9c6b8..87b7137 100755
--- a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
+++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
@@ -15,4 +15,4 @@
 # limitations under the License.
 
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-$DIR/robot.sh $DIR/../../src/test/acceptance
+"$DIR/robot.sh" "$DIR/../../src/test/acceptance"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
--
diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh 
b/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
index 9f1d367..052ffb3 100755
--- a/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
+++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
@@ -18,15 +18,9 @@ set -x
 
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 
-#Dir od the definition of the dind based test exeucution container
-DOCKERDIR="$DIR/../docker"
-
 #Dir to save the results
 TARGETDIR="$DIR/../../target/dnd"
 
-#Dir to mount the distribution from
-OZONEDIST="$DIR/../../../../hadoop-dist/target/ozone"
-
 #Name and imagename of the temporary, dind based tes

hadoop git commit: HADOOP-15705. Typo in the definition of "stable" in the interface classification

2018-08-29 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 692736f7c -> d53a10b0a


HADOOP-15705. Typo in the definition of "stable" in the interface classification

Change-Id: I3eae2143400a534903db4f186400561fc8d2bd56


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d53a10b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d53a10b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d53a10b0

Branch: refs/heads/trunk
Commit: d53a10b0a552155de700e396fd7f450a4c5f9c22
Parents: 692736f
Author: Daniel Templeton 
Authored: Wed Aug 29 13:59:32 2018 -0700
Committer: Daniel Templeton 
Committed: Wed Aug 29 13:59:32 2018 -0700

--
 .../hadoop-common/src/site/markdown/InterfaceClassification.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d53a10b0/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index a21e28b..7348044 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -124,7 +124,7 @@ hence serves as a safe development target. A Stable 
interface may evolve
 compatibly between minor releases.
 
 Incompatible changes allowed: major (X.0.0)
-Compatible changes allowed: maintenance (x.Y.0)
+Compatible changes allowed: maintenance (x.y.Z)
 
  Evolving
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15705. Typo in the definition of "stable" in the interface classification

2018-08-29 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 b8618556e -> 90c8cca78


HADOOP-15705. Typo in the definition of "stable" in the interface classification

Change-Id: I3eae2143400a534903db4f186400561fc8d2bd56
(cherry picked from commit d53a10b0a552155de700e396fd7f450a4c5f9c22)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90c8cca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90c8cca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90c8cca7

Branch: refs/heads/branch-3.1
Commit: 90c8cca78389f534429ebf81576ce7ebba9356f7
Parents: b861855
Author: Daniel Templeton 
Authored: Wed Aug 29 13:59:32 2018 -0700
Committer: Daniel Templeton 
Committed: Wed Aug 29 14:00:42 2018 -0700

--
 .../hadoop-common/src/site/markdown/InterfaceClassification.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90c8cca7/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index a21e28b..7348044 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -124,7 +124,7 @@ hence serves as a safe development target. A Stable 
interface may evolve
 compatibly between minor releases.
 
 Incompatible changes allowed: major (X.0.0)
-Compatible changes allowed: maintenance (x.Y.0)
+Compatible changes allowed: maintenance (x.y.Z)
 
  Evolving
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15705. Typo in the definition of "stable" in the interface classification

2018-08-29 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 3b0127961 -> fbaa11ef4


HADOOP-15705. Typo in the definition of "stable" in the interface classification

Change-Id: I3eae2143400a534903db4f186400561fc8d2bd56
(cherry picked from commit d53a10b0a552155de700e396fd7f450a4c5f9c22)
(cherry picked from commit 90c8cca78389f534429ebf81576ce7ebba9356f7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbaa11ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbaa11ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbaa11ef

Branch: refs/heads/branch-3.0
Commit: fbaa11ef4cde64a410ac0f54693ffa343ad61028
Parents: 3b01279
Author: Daniel Templeton 
Authored: Wed Aug 29 13:59:32 2018 -0700
Committer: Daniel Templeton 
Committed: Wed Aug 29 14:01:19 2018 -0700

--
 .../hadoop-common/src/site/markdown/InterfaceClassification.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbaa11ef/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index a21e28b..7348044 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -124,7 +124,7 @@ hence serves as a safe development target. A Stable 
interface may evolve
 compatibly between minor releases.
 
 Incompatible changes allowed: major (X.0.0)
-Compatible changes allowed: maintenance (x.Y.0)
+Compatible changes allowed: maintenance (x.y.Z)
 
  Evolving
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8051: TestRMEmbeddedElector#testCallbackSynchronization is flakey. Contributed by Robert Kanter and Jason Lowe.

2018-08-29 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8137b473a -> 21aa7f1d8


YARN-8051: TestRMEmbeddedElector#testCallbackSynchronization is flakey. 
Contributed by  Robert Kanter and Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21aa7f1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21aa7f1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21aa7f1d

Branch: refs/heads/branch-2
Commit: 21aa7f1d8219329c02cd9faa771adc049270ac70
Parents: 8137b47
Author: Eric E Payne 
Authored: Wed Aug 29 21:30:38 2018 +
Committer: Eric E Payne 
Committed: Wed Aug 29 21:30:38 2018 +

--
 .../resourcemanager/TestRMEmbeddedElector.java  | 112 ++-
 1 file changed, 86 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21aa7f1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 47d18f3..a2b9afd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -17,24 +17,31 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ClientBaseWithFixes;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 import java.io.IOException;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import java.util.concurrent.atomic.AtomicInteger;
+
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.atLeast;
-import static org.mockito.Mockito.atMost;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -49,6 +56,8 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 
   private Configuration conf;
   private AtomicBoolean callbackCalled;
+  private AtomicInteger transitionToActiveCounter;
+  private AtomicInteger transitionToStandbyCounter;
 
   private enum SyncTestType {
 ACTIVE,
@@ -76,6 +85,8 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
 
 callbackCalled = new AtomicBoolean(false);
+transitionToActiveCounter = new AtomicInteger(0);
+transitionToStandbyCounter = new AtomicInteger(0);
   }
 
   /**
@@ -104,7 +115,7 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
*/
   @Test
   public void testCallbackSynchronization()
-  throws IOException, InterruptedException {
+  throws IOException, InterruptedException, TimeoutException {
 testCallbackSynchronization(SyncTestType.ACTIVE);
 testCallbackSynchronization(SyncTestType.STANDBY);
 testCallbackSynchronization(SyncTestType.NEUTRAL);
@@ -118,9 +129,10 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
* @param type the type of test to run
* @throws IOException if there's an issue transitioning
* @throws InterruptedException if interrupted
+   * @throws TimeoutException if waitFor timeout reached
*/
   private void testCallbackSynchronization(SyncTestType type)
-  throws IOException, InterruptedException {
+  throws IOException, InterruptedException, TimeoutException {
 AdminService as = mock(AdminService.class);
 RMContext rc = 

hadoop git commit: YARN-8051: TestRMEmbeddedElector#testCallbackSynchronization is flakey. Contributed by Robert Kanter and Jason Lowe.

2018-08-29 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 42da1c5cf -> 809faede9


YARN-8051: TestRMEmbeddedElector#testCallbackSynchronization is flakey. 
Contributed by  Robert Kanter and Jason Lowe.

(cherry picked from commit 21aa7f1d8219329c02cd9faa771adc049270ac70)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/809faede
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/809faede
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/809faede

Branch: refs/heads/branch-2.9
Commit: 809faede96c0f11c60ff19c6b19e529687a4a2e6
Parents: 42da1c5
Author: Eric E Payne 
Authored: Wed Aug 29 21:30:38 2018 +
Committer: Eric E Payne 
Committed: Wed Aug 29 21:58:16 2018 +

--
 .../resourcemanager/TestRMEmbeddedElector.java  | 112 ++-
 1 file changed, 86 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/809faede/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 47d18f3..a2b9afd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -17,24 +17,31 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ClientBaseWithFixes;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 import java.io.IOException;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import java.util.concurrent.atomic.AtomicInteger;
+
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.atLeast;
-import static org.mockito.Mockito.atMost;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -49,6 +56,8 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 
   private Configuration conf;
   private AtomicBoolean callbackCalled;
+  private AtomicInteger transitionToActiveCounter;
+  private AtomicInteger transitionToStandbyCounter;
 
   private enum SyncTestType {
 ACTIVE,
@@ -76,6 +85,8 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
 
 callbackCalled = new AtomicBoolean(false);
+transitionToActiveCounter = new AtomicInteger(0);
+transitionToStandbyCounter = new AtomicInteger(0);
   }
 
   /**
@@ -104,7 +115,7 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
*/
   @Test
   public void testCallbackSynchronization()
-  throws IOException, InterruptedException {
+  throws IOException, InterruptedException, TimeoutException {
 testCallbackSynchronization(SyncTestType.ACTIVE);
 testCallbackSynchronization(SyncTestType.STANDBY);
 testCallbackSynchronization(SyncTestType.NEUTRAL);
@@ -118,9 +129,10 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
* @param type the type of test to run
* @throws IOException if there's an issue transitioning
* @throws InterruptedException if interrupted
+   * @throws TimeoutException if waitFor timeout reached
*/
   private void testCallbackSynchronization(SyncTestType type)
-  throws IOException, InterruptedException {
+  throws IOException, InterruptedException, TimeoutExcepti

hadoop git commit: YARN-8051: TestRMEmbeddedElector#testCallbackSynchronization is flakey. Contributed by Robert Kanter and Jason Lowe.

2018-08-29 Thread epayne
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 6d0f58b0c -> c7c5d7392


YARN-8051: TestRMEmbeddedElector#testCallbackSynchronization is flakey. 
Contributed by  Robert Kanter and Jason Lowe.

(cherry picked from commit 21aa7f1d8219329c02cd9faa771adc049270ac70)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7c5d739
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7c5d739
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7c5d739

Branch: refs/heads/branch-2.8
Commit: c7c5d73925675c3702406794ef8b35ce17d4a8cd
Parents: 6d0f58b
Author: Eric E Payne 
Authored: Wed Aug 29 21:30:38 2018 +
Committer: Eric E Payne 
Committed: Wed Aug 29 22:12:25 2018 +

--
 .../resourcemanager/TestRMEmbeddedElector.java  | 112 ++-
 1 file changed, 86 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7c5d739/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index c4fcc5d..3e5a3b3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -17,24 +17,31 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ClientBaseWithFixes;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 import java.io.IOException;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
+import java.util.concurrent.atomic.AtomicInteger;
+
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.atLeast;
-import static org.mockito.Mockito.atMost;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -49,6 +56,8 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 
   private Configuration conf;
   private AtomicBoolean callbackCalled;
+  private AtomicInteger transitionToActiveCounter;
+  private AtomicInteger transitionToStandbyCounter;
 
   private enum SyncTestType {
 ACTIVE,
@@ -76,6 +85,8 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
 
 callbackCalled = new AtomicBoolean(false);
+transitionToActiveCounter = new AtomicInteger(0);
+transitionToStandbyCounter = new AtomicInteger(0);
   }
 
   /**
@@ -104,7 +115,7 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
*/
   @Test
   public void testCallbackSynchronization()
-  throws IOException, InterruptedException {
+  throws IOException, InterruptedException, TimeoutException {
 testCallbackSynchronization(SyncTestType.ACTIVE);
 testCallbackSynchronization(SyncTestType.STANDBY);
 testCallbackSynchronization(SyncTestType.NEUTRAL);
@@ -118,9 +129,10 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
* @param type the type of test to run
* @throws IOException if there's an issue transitioning
* @throws InterruptedException if interrupted
+   * @throws TimeoutException if waitFor timeout reached
*/
   private void testCallbackSynchronization(SyncTestType type)
-  throws IOException, InterruptedException {
+  throws IOException, InterruptedException, TimeoutExcepti

hadoop git commit: HDFS-13863. FsDatasetImpl should log DiskOutOfSpaceException. Contributed by Fei Hui.

2018-08-29 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk d53a10b0a -> 582cb10ec


HDFS-13863. FsDatasetImpl should log DiskOutOfSpaceException. Contributed by 
Fei Hui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/582cb10e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/582cb10e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/582cb10e

Branch: refs/heads/trunk
Commit: 582cb10ec74ed5666946a3769002ceb80ba660cb
Parents: d53a10b
Author: Yiqun Lin 
Authored: Thu Aug 30 11:21:13 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Aug 30 11:21:13 2018 +0800

--
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/582cb10e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d7f133e..27196c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1397,6 +1397,9 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   datanode.getMetrics().incrRamDiskBlocksWrite();
 } catch (DiskOutOfSpaceException de) {
   // Ignore the exception since we just fall back to persistent 
storage.
+  LOG.warn("Insufficient space for placing the block on a transient "
+  + "volume, fall back to persistent storage: "
+  + de.getMessage());
 } finally {
   if (ref == null) {
 cacheManager.release(b.getNumBytes());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13863. FsDatasetImpl should log DiskOutOfSpaceException. Contributed by Fei Hui.

2018-08-29 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 90c8cca78 -> 8a3be0d5a


HDFS-13863. FsDatasetImpl should log DiskOutOfSpaceException. Contributed by 
Fei Hui.

(cherry picked from commit 582cb10ec74ed5666946a3769002ceb80ba660cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a3be0d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a3be0d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a3be0d5

Branch: refs/heads/branch-3.1
Commit: 8a3be0d5a4361ade32430f3da99e0b8e2ecb03a9
Parents: 90c8cca
Author: Yiqun Lin 
Authored: Thu Aug 30 11:21:13 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Aug 30 11:22:57 2018 +0800

--
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a3be0d5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d7f133e..27196c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1397,6 +1397,9 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   datanode.getMetrics().incrRamDiskBlocksWrite();
 } catch (DiskOutOfSpaceException de) {
   // Ignore the exception since we just fall back to persistent 
storage.
+  LOG.warn("Insufficient space for placing the block on a transient "
+  + "volume, fall back to persistent storage: "
+  + de.getMessage());
 } finally {
   if (ref == null) {
 cacheManager.release(b.getNumBytes());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13863. FsDatasetImpl should log DiskOutOfSpaceException. Contributed by Fei Hui.

2018-08-29 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 fbaa11ef4 -> 6a547856e


HDFS-13863. FsDatasetImpl should log DiskOutOfSpaceException. Contributed by 
Fei Hui.

(cherry picked from commit 582cb10ec74ed5666946a3769002ceb80ba660cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a547856
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a547856
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a547856

Branch: refs/heads/branch-3.0
Commit: 6a547856ef205c89129b092e535e9916780ecd37
Parents: fbaa11e
Author: Yiqun Lin 
Authored: Thu Aug 30 11:21:13 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Aug 30 11:24:25 2018 +0800

--
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a547856/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 1eeec27..c2c25ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1347,6 +1347,9 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   datanode.getMetrics().incrRamDiskBlocksWrite();
 } catch (DiskOutOfSpaceException de) {
   // Ignore the exception since we just fall back to persistent 
storage.
+  LOG.warn("Insufficient space for placing the block on a transient "
+  + "volume, fall back to persistent storage: "
+  + de.getMessage());
 } finally {
   if (ref == null) {
 cacheManager.release(b.getNumBytes());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by Kitti Nanasi.

2018-08-29 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 582cb10ec -> 781437c21


HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by 
Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/781437c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/781437c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/781437c2

Branch: refs/heads/trunk
Commit: 781437c219dc3422797a32dc7ba72cd4f5ee38e2
Parents: 582cb10
Author: Kitti Nanasi 
Authored: Wed Aug 29 22:06:36 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 29 22:07:49 2018 -0700

--
 .../crypto/key/kms/server/KMSConfiguration.java | 31 
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 38 +---
 .../crypto/key/kms/server/KMSWebServer.java |  1 +
 3 files changed, 33 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/781437c2/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index 18eec19..35ffb42 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -103,6 +104,8 @@ public class KMSConfiguration {
 
   public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
 
+  private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
+
   static {
 Configuration.addDefaultResource(KMS_DEFAULT_XML);
 Configuration.addDefaultResource(KMS_SITE_XML);
@@ -159,4 +162,32 @@ public class KMSConfiguration {
 }
 return newer;
   }
+
+  public static void initLogging() {
+String confDir = System.getProperty(KMS_CONFIG_DIR);
+if (confDir == null) {
+  throw new RuntimeException("System property '" +
+  KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
+}
+if (System.getProperty("log4j.configuration") == null) {
+  System.setProperty("log4j.defaultInitOverride", "true");
+  boolean fromClasspath = true;
+  File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
+  if (log4jConf.exists()) {
+PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
+fromClasspath = false;
+  } else {
+ClassLoader cl = Thread.currentThread().getContextClassLoader();
+URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
+if (log4jUrl != null) {
+  PropertyConfigurator.configure(log4jUrl);
+}
+  }
+  LOG.debug("KMS log starting");
+  if (fromClasspath) {
+LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
+LOG.warn("Logging with INFO level to standard output");
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/781437c2/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index cb4bf7e..0640e25 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -17,10 +17,8 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.URI;
-import java.net.URL;
 
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
@@ -37,14 +35,13 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
-import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 public cl

hadoop git commit: HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by Kitti Nanasi.

2018-08-29 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 8a3be0d5a -> 12eb9cc3b


HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by 
Kitti Nanasi.

(cherry picked from commit 781437c219dc3422797a32dc7ba72cd4f5ee38e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12eb9cc3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12eb9cc3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12eb9cc3

Branch: refs/heads/branch-3.1
Commit: 12eb9cc3bb7c07a7ff2026b704d26e7a68fcd622
Parents: 8a3be0d
Author: Kitti Nanasi 
Authored: Wed Aug 29 22:06:36 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 29 22:09:05 2018 -0700

--
 .../crypto/key/kms/server/KMSConfiguration.java | 31 
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 38 +---
 .../crypto/key/kms/server/KMSWebServer.java |  1 +
 3 files changed, 33 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12eb9cc3/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index 18eec19..35ffb42 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -103,6 +104,8 @@ public class KMSConfiguration {
 
   public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
 
+  private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
+
   static {
 Configuration.addDefaultResource(KMS_DEFAULT_XML);
 Configuration.addDefaultResource(KMS_SITE_XML);
@@ -159,4 +162,32 @@ public class KMSConfiguration {
 }
 return newer;
   }
+
+  public static void initLogging() {
+String confDir = System.getProperty(KMS_CONFIG_DIR);
+if (confDir == null) {
+  throw new RuntimeException("System property '" +
+  KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
+}
+if (System.getProperty("log4j.configuration") == null) {
+  System.setProperty("log4j.defaultInitOverride", "true");
+  boolean fromClasspath = true;
+  File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
+  if (log4jConf.exists()) {
+PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
+fromClasspath = false;
+  } else {
+ClassLoader cl = Thread.currentThread().getContextClassLoader();
+URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
+if (log4jUrl != null) {
+  PropertyConfigurator.configure(log4jUrl);
+}
+  }
+  LOG.debug("KMS log starting");
+  if (fromClasspath) {
+LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
+LOG.warn("Logging with INFO level to standard output");
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12eb9cc3/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index cb4bf7e..0640e25 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -17,10 +17,8 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.URI;
-import java.net.URL;
 
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
@@ -37,14 +35,13 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
-import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.L

hadoop git commit: HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by Kitti Nanasi.

2018-08-29 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 6a547856e -> fa32269ce


HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by 
Kitti Nanasi.

(cherry picked from commit 781437c219dc3422797a32dc7ba72cd4f5ee38e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa32269c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa32269c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa32269c

Branch: refs/heads/branch-3.0
Commit: fa32269cee5ec7125fb7e6d06c49716fdfe00af9
Parents: 6a54785
Author: Kitti Nanasi 
Authored: Wed Aug 29 22:06:36 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 29 22:09:13 2018 -0700

--
 .../crypto/key/kms/server/KMSConfiguration.java | 31 
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 38 +---
 .../crypto/key/kms/server/KMSWebServer.java |  1 +
 3 files changed, 33 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa32269c/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index 18eec19..35ffb42 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -103,6 +104,8 @@ public class KMSConfiguration {
 
   public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
 
+  private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
+
   static {
 Configuration.addDefaultResource(KMS_DEFAULT_XML);
 Configuration.addDefaultResource(KMS_SITE_XML);
@@ -159,4 +162,32 @@ public class KMSConfiguration {
 }
 return newer;
   }
+
+  public static void initLogging() {
+String confDir = System.getProperty(KMS_CONFIG_DIR);
+if (confDir == null) {
+  throw new RuntimeException("System property '" +
+  KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
+}
+if (System.getProperty("log4j.configuration") == null) {
+  System.setProperty("log4j.defaultInitOverride", "true");
+  boolean fromClasspath = true;
+  File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
+  if (log4jConf.exists()) {
+PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
+fromClasspath = false;
+  } else {
+ClassLoader cl = Thread.currentThread().getContextClassLoader();
+URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
+if (log4jUrl != null) {
+  PropertyConfigurator.configure(log4jUrl);
+}
+  }
+  LOG.debug("KMS log starting");
+  if (fromClasspath) {
+LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
+LOG.warn("Logging with INFO level to standard output");
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa32269c/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index 9a71fa2..571d675 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -17,10 +17,8 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.URI;
-import java.net.URL;
 
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
@@ -36,14 +34,13 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
-import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.L