hadoop git commit: HDDS-268. Add SCM close container watcher. Contributed by Ajay Kumar.

2018-09-04 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6ccb809c2 -> 85c3fe341


HDDS-268. Add SCM close container watcher. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85c3fe34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85c3fe34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85c3fe34

Branch: refs/heads/trunk
Commit: 85c3fe341a77bc1a74fdc7af64e18e4557fa8e96
Parents: 6ccb809
Author: Xiaoyu Yao 
Authored: Tue Sep 4 22:56:42 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue Sep 4 22:56:57 2018 -0700

--
 .../hadoop/hdds/server/events/EventWatcher.java |  20 +-
 .../scm/command/CommandStatusReportHandler.java |   6 +-
 .../container/CloseContainerEventHandler.java   |  26 ++
 .../scm/container/CloseContainerWatcher.java| 100 +++
 .../hadoop/hdds/scm/events/SCMEvents.java   |  11 +
 .../scm/server/StorageContainerManager.java |   8 +
 .../container/TestCloseContainerWatcher.java| 287 +++
 7 files changed, 450 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85c3fe34/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
--
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
index e3fee63..ba5078b 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java
@@ -102,13 +102,13 @@ public abstract class EventWatcher {
-  long id = completionPayload.getId();
   try {
-handleCompletion(id, publisher);
+handleCompletion(completionPayload, publisher);
   } catch (LeaseNotFoundException e) {
 //It's already done. Too late, we already retried it.
 //Not a real problem.
-LOG.warn("Completion event without active lease. Id={}", id);
+LOG.warn("Completion event without active lease. Id={}",
+completionPayload.getId());
   }
 });
 
@@ -140,9 +140,11 @@ public abstract class EventWatcherhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/85c3fe34/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
index 9413a46..054665a 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java
@@ -103,7 +103,7 @@ public class CommandStatusReportHandler implements
* Wrapper event for Replicate Command.
*/
   public static class ReplicationStatus extends CommandStatusEvent {
-ReplicationStatus(CommandStatus cmdStatus) {
+public ReplicationStatus(CommandStatus cmdStatus) {
   super(cmdStatus);
 }
   }
@@ -112,7 +112,7 @@ public class CommandStatusReportHandler implements
* Wrapper event for CloseContainer Command.
*/
   public static class CloseContainerStatus extends CommandStatusEvent {
-CloseContainerStatus(CommandStatus cmdStatus) {
+public CloseContainerStatus(CommandStatus cmdStatus) {
   super(cmdStatus);
 }
   }
@@ -121,7 +121,7 @@ public class CommandStatusReportHandler implements
* Wrapper event for DeleteBlock Command.
*/
   public static class DeleteBlockCommandStatus extends CommandStatusEvent {
-DeleteBlockCommandStatus(CommandStatus cmdStatus) {
+public DeleteBlockCommandStatus(CommandStatus cmdStatus) {
   super(cmdStatus);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85c3fe34/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index 863907e..b94ce4f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -23,12 +2

hadoop git commit: HADOOP-10219. ipc.Client.setupIOstreams() needs to check for ClientCache.stopClient requested shutdowns. Contributed by Kihwal Lee and Lukas Majercak.

2018-09-04 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 25337c045 -> 9ee9d68e4


HADOOP-10219. ipc.Client.setupIOstreams() needs to check for 
ClientCache.stopClient requested shutdowns.
Contributed by Kihwal Lee and Lukas Majercak.

(cherry picked from commit 9e96ac666d783376a8cdea9c3cc84098c5bdcb56)
(cherry picked from commit 142d878c902da6bc02d0c21cdd5ca8674da7f5c8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ee9d68e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ee9d68e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ee9d68e

Branch: refs/heads/branch-3.0
Commit: 9ee9d68e4b8a86e265a7dc9b6a3ad71b6ceb169b
Parents: 25337c0
Author: Steve Loughran 
Authored: Tue Sep 4 16:46:12 2018 +0100
Committer: Wei-Chiu Chuang 
Committed: Tue Sep 4 22:25:24 2018 -0700

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 14 ++
 .../java/org/apache/hadoop/ipc/TestIPC.java | 45 
 2 files changed, 59 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ee9d68e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index a0417d6..9ee0647 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -70,6 +70,7 @@ import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 
 import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
 import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
@@ -438,6 +439,8 @@ public class Client implements AutoCloseable {
 
 private final Object sendRpcRequestLock = new Object();
 
+private AtomicReference connectingThread = new AtomicReference<>();
+
 public Connection(ConnectionId remoteId, int serviceClass) throws 
IOException {
   this.remoteId = remoteId;
   this.server = remoteId.getAddress();
@@ -775,6 +778,7 @@ public class Client implements AutoCloseable {
 }
   }
   try {
+connectingThread.set(Thread.currentThread());
 if (LOG.isDebugEnabled()) {
   LOG.debug("Connecting to "+server);
 }
@@ -860,6 +864,8 @@ public class Client implements AutoCloseable {
   markClosed(new IOException("Couldn't set up IO streams: " + t, t));
 }
 close();
+  } finally {
+connectingThread.set(null);
   }
 }
 
@@ -1213,6 +1219,13 @@ public class Client implements AutoCloseable {
 notifyAll();
   }
 }
+
+private void interruptConnectingThread() {
+  Thread connThread = connectingThread.get();
+  if (connThread != null) {
+connThread.interrupt();
+  }
+}
 
 /** Close the connection. */
 private synchronized void close() {
@@ -1315,6 +1328,7 @@ public class Client implements AutoCloseable {
 // wake up all connections
 for (Connection conn : connections.values()) {
   conn.interrupt();
+  conn.interruptConnectingThread();
 }
 
 // wait until all connections are closed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ee9d68e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index a4577f2..cdbaea4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
@@ -1398,6 +1399,50 @@ public class TestIPC {
 assertEquals(Client.getTimeout(config), -1);
   }
 
+  @Test(timeout=6)
+  public void testSetupConnectionShouldNotBlockShutdown() throws Exception {
+// Start server
+SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
+  

hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 a9d86c526 -> 0a3eefeda


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a3eefed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a3eefed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a3eefed

Branch: refs/heads/branch-2.7
Commit: 0a3eefeda1bf13d84fc4472027499168ddd8407f
Parents: a9d86c5
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:55:33 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a3eefed/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 72c125d..9447e41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -243,7 +243,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 54d3189cf -> 25337c045


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25337c04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25337c04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25337c04

Branch: refs/heads/branch-3.0
Commit: 25337c045a44f7f837574d0a91a655bfb2e04048
Parents: 54d3189
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:54:56 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25337c04/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 89ad670..f2de043 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -220,7 +220,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 2e869cbce -> 5cd0b8038


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cd0b803
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cd0b803
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cd0b803

Branch: refs/heads/branch-2.9
Commit: 5cd0b8038cc81a38ea2dbaa42e50038034b3ecee
Parents: 2e869cb
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:55:16 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cd0b803/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 89ad670..f2de043 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -220,7 +220,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 c7c5d7392 -> 1359e8da7


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1359e8da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1359e8da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1359e8da

Branch: refs/heads/branch-2.8
Commit: 1359e8da770aaf6fba35320dac352f9a362394d3
Parents: c7c5d73
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:55:25 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1359e8da/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 72c125d..9447e41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -243,7 +243,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2e744bd31 -> 0468b6e73


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0468b6e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0468b6e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0468b6e7

Branch: refs/heads/branch-2
Commit: 0468b6e7361fec0882c45358dd83385a1b13e5c7
Parents: 2e744bd
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:55:09 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0468b6e7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 89ad670..f2de043 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -220,7 +220,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 a26565960 -> fde3b5ac2


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 
(cherry picked from commit 6ccb809c2d38a45e716153ba16e135cb76167b2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fde3b5ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fde3b5ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fde3b5ac

Branch: refs/heads/branch-3.1
Commit: fde3b5ac227d07572637d39447b0ab833f0f73af
Parents: a265659
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:54:43 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fde3b5ac/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 89ad670..f2de043 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -220,7 +220,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13812. Fix the inconsistent default refresh interval on Caching documentation. Contributed by Hrishikesh Gadre.

2018-09-04 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6883fe860 -> 6ccb809c2


HDFS-13812. Fix the inconsistent default refresh interval on Caching 
documentation. Contributed by Hrishikesh Gadre.

Signed-off-by: Xiao Chen 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ccb809c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ccb809c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ccb809c

Branch: refs/heads/trunk
Commit: 6ccb809c2d38a45e716153ba16e135cb76167b2b
Parents: 6883fe8
Author: Hrishikesh Gadre 
Authored: Tue Sep 4 21:48:15 2018 -0700
Committer: Xiao Chen 
Committed: Tue Sep 4 21:53:42 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ccb809c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
index 89ad670..f2de043 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/CentralizedCacheManagement.md
@@ -220,7 +220,7 @@ The following properties are not required, but may be 
specified for tuning:
 
 The NameNode will use this as the amount of milliseconds between 
subsequent path cache rescans. This calculates the blocks to cache and each 
DataNode containing a replica of the block that should cache it.
 
-By default, this parameter is set to 30, which is five minutes.
+By default, this parameter is set to 3, which is thirty seconds.
 
 *   dfs.datanode.fsdatasetcache.max.threads.per.volume
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-383. Ozone Client should discard preallocated blocks from closed containers. Contributed by Shashikant Banerjee

2018-09-04 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6e4c73147 -> 6883fe860


HDDS-383. Ozone Client should discard preallocated blocks from closed 
containers.  Contributed by Shashikant Banerjee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6883fe86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6883fe86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6883fe86

Branch: refs/heads/trunk
Commit: 6883fe860f484da2b835f9f57307b84165ed7f6f
Parents: 6e4c731
Author: Tsz Wo Nicholas Sze 
Authored: Tue Sep 4 17:10:10 2018 -0700
Committer: Tsz Wo Nicholas Sze 
Committed: Tue Sep 4 17:10:44 2018 -0700

--
 .../ozone/client/io/ChunkGroupOutputStream.java | 86 +---
 .../hadoop/ozone/om/helpers/OmKeyInfo.java  |  4 +
 .../rpc/TestCloseContainerHandlingByClient.java | 64 ++-
 3 files changed, 102 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6883fe86/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 21406b5..3742a9a 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -53,6 +53,7 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
+import java.util.ListIterator;
 
 /**
  * Maintaining a list of ChunkInputStream. Write based on offset.
@@ -81,7 +82,6 @@ public class ChunkGroupOutputStream extends OutputStream {
   private final int chunkSize;
   private final String requestID;
   private boolean closed;
-  private List locationInfoList;
   private final RetryPolicy retryPolicy;
   /**
* A constructor for testing purpose only.
@@ -97,7 +97,6 @@ public class ChunkGroupOutputStream extends OutputStream {
 chunkSize = 0;
 requestID = null;
 closed = false;
-locationInfoList = null;
 retryPolicy = null;
   }
 
@@ -118,9 +117,16 @@ public class ChunkGroupOutputStream extends OutputStream {
 return streamEntries;
   }
 
-  @VisibleForTesting
-  public long getOpenID() {
-return openID;
+  public List getLocationInfoList() {
+List locationInfoList = new ArrayList<>();
+for (ChunkOutputStreamEntry streamEntry : streamEntries) {
+  OmKeyLocationInfo info =
+  new OmKeyLocationInfo.Builder().setBlockID(streamEntry.blockID)
+  .setShouldCreateContainer(false)
+  .setLength(streamEntry.currentPosition).setOffset(0).build();
+  locationInfoList.add(info);
+}
+return locationInfoList;
   }
 
   public ChunkGroupOutputStream(
@@ -146,7 +152,6 @@ public class ChunkGroupOutputStream extends OutputStream {
 this.xceiverClientManager = xceiverClientManager;
 this.chunkSize = chunkSize;
 this.requestID = requestId;
-this.locationInfoList = new ArrayList<>();
 this.retryPolicy = retryPolicy;
 LOG.debug("Expecting open key with one block, but got" +
 info.getKeyLocationVersions().size());
@@ -211,18 +216,6 @@ public class ChunkGroupOutputStream extends OutputStream {
 streamEntries.add(new ChunkOutputStreamEntry(subKeyInfo.getBlockID(),
 keyArgs.getKeyName(), xceiverClientManager, xceiverClient, requestID,
 chunkSize, subKeyInfo.getLength()));
-// reset the original length to zero here. It will be updated as and when
-// the data gets written.
-subKeyInfo.setLength(0);
-locationInfoList.add(subKeyInfo);
-  }
-
-  private void incrementBlockLength(int index, long length) {
-if (locationInfoList != null) {
-  OmKeyLocationInfo locationInfo = locationInfoList.get(index);
-  long originalLength = locationInfo.getLength();
-  locationInfo.setLength(originalLength + length);
-}
   }
 
   @VisibleForTesting
@@ -298,7 +291,6 @@ public class ChunkGroupOutputStream extends OutputStream {
   throw ioe;
 }
   }
-  incrementBlockLength(currentStreamIndex, writeLen);
   if (current.getRemaining() <= 0) {
 // since the current block is already written close the stream.
 handleFlushOrClose(true);
@@ -316,12 +308,6 @@ public class ChunkGroupOutputStream extends OutputStream {
 ContainerProtos.GetCommittedBlockLengthResponseProto responseProto;
 RetryPolicy.RetryAction action;
 int numRetries = 0;
-
-// TODO : At this point of time, we also need to allocate new blocks
-// from a

hadoop git commit: HDDS-396. Remove openContainers.db from SCM. Contributed by Dinesh Chitlangia.

2018-09-04 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9964e33e8 -> 6e4c73147


HDDS-396. Remove openContainers.db from SCM.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e4c7314
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e4c7314
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e4c7314

Branch: refs/heads/trunk
Commit: 6e4c73147185ae2e5529028c552c47d1edcead36
Parents: 9964e33
Author: Anu Engineer 
Authored: Tue Sep 4 16:27:31 2018 -0700
Committer: Anu Engineer 
Committed: Tue Sep 4 16:27:31 2018 -0700

--
 .../src/main/java/org/apache/hadoop/ozone/OzoneConsts.java   | 1 -
 .../src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java| 4 
 2 files changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e4c7314/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index eb37b79..bf4508b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -92,7 +92,6 @@ public final class OzoneConsts {
   public static final String CONTAINER_DB_SUFFIX = "container.db";
   public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
   public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
-  public static final String OPEN_CONTAINERS_DB = "openContainers.db";
   public static final String DELETED_BLOCK_DB = "deletedBlock.db";
   public static final String OM_DB_NAME = "om.db";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e4c7314/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
--
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index 080840a..522fea9 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -59,7 +59,6 @@ import static 
org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_USER_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
 
 /**
  * This is the CLI that can be use to convert an ozone metadata DB into
@@ -270,9 +269,6 @@ public class SQLCLI  extends Configured implements Tool {
 if (dbName.toString().endsWith(CONTAINER_DB_SUFFIX)) {
   LOG.info("Converting container DB");
   convertContainerDB(dbPath, outPath);
-} else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
-  LOG.info("Converting open container DB");
-  convertOpenContainerDB(dbPath, outPath);
 } else if (dbName.toString().equals(OM_DB_NAME)) {
   LOG.info("Converting om DB");
   convertOMDB(dbPath, outPath);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-369. Remove the containers of a dead node from the container state map. Contributed by Elek, Marton

2018-09-04 Thread hanishakoneru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 54f204459 -> 9964e33e8


HDDS-369. Remove the containers of a dead node from the container state map. 
Contributed by Elek, Marton


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9964e33e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9964e33e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9964e33e

Branch: refs/heads/trunk
Commit: 9964e33e8df1a6574d106c22fcaf339db8d48750
Parents: 54f2044
Author: Hanisha Koneru 
Authored: Tue Sep 4 14:57:54 2018 -0700
Committer: Hanisha Koneru 
Committed: Tue Sep 4 14:57:54 2018 -0700

--
 .../hadoop/hdds/scm/node/DeadNodeHandler.java   |  41 ++-
 .../hdds/scm/node/states/Node2ContainerMap.java |  29 +++--
 .../scm/server/StorageContainerManager.java |   3 +-
 .../hdds/scm/node/TestDeadNodeHandler.java  | 112 +++
 4 files changed, 166 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9964e33e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
index 427aef8..c853b3b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -18,11 +18,19 @@
 
 package org.apache.hadoop.hdds.scm.node;
 
+import java.util.Set;
+
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * Handles Dead Node event.
  */
@@ -30,13 +38,34 @@ public class DeadNodeHandler implements 
EventHandler {
 
   private final Node2ContainerMap node2ContainerMap;
 
-  public DeadNodeHandler(Node2ContainerMap node2ContainerMap) {
+  private final ContainerStateManager containerStateManager;
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(DeadNodeHandler.class);
+
+  public DeadNodeHandler(
+  Node2ContainerMap node2ContainerMap,
+  ContainerStateManager containerStateManager) {
 this.node2ContainerMap = node2ContainerMap;
+this.containerStateManager = containerStateManager;
   }
 
   @Override
   public void onMessage(DatanodeDetails datanodeDetails,
-EventPublisher publisher) {
-//TODO: add logic to handle dead node.
+  EventPublisher publisher) {
+Set containers =
+node2ContainerMap.getContainers(datanodeDetails.getUuid());
+LOG.info(
+"Datanode {}  is dead. Removing replications from the in-memory 
state.",
+datanodeDetails.getUuid());
+for (ContainerID container : containers) {
+  try {
+containerStateManager.removeContainerReplica(container,
+datanodeDetails);
+  } catch (SCMException e) {
+LOG.error("Can't remove container from containerStateMap {}", container
+.getId(), e);
+  }
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9964e33e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
index d4d475e..97c254b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
@@ -18,10 +18,6 @@
 
 package org.apa

[4/5] hadoop git commit: HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

2018-09-04 Thread inigoiri
HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. 
Contributed by yanghuafeng.

(cherry picked from commit 54f2044595206455484284b43e5976c8a1982aaf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e744bd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e744bd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e744bd3

Branch: refs/heads/branch-2
Commit: 2e744bd31d68a8d7aec258a18873f220502d9f54
Parents: 5902c06
Author: Inigo Goiri 
Authored: Tue Sep 4 12:17:17 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Sep 4 12:18:38 2018 -0700

--
 .../federation/resolver/MountTableResolver.java | 39 +++-
 .../server/federation/router/RBFConfigKeys.java |  4 ++
 .../federation/router/RouterRpcServer.java  |  2 +-
 .../src/main/resources/hdfs-rbf-default.xml |  8 
 .../resolver/TestMountTableResolver.java| 26 +
 5 files changed, 76 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e744bd3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index d45441f..bdd75c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
@@ -95,6 +97,8 @@ public class MountTableResolver
 
   /** Default nameservice when no mount matches the math. */
   private String defaultNameService = "";
+  /** If use default nameservice to read and write files. */
+  private boolean defaultNSEnable = true;
 
   /** Synchronization for both the tree and the cache. */
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -163,6 +167,10 @@ public class MountTableResolver
 DFS_ROUTER_DEFAULT_NAMESERVICE,
 DFSUtil.getNamenodeNameServiceId(conf));
 
+this.defaultNSEnable = conf.getBoolean(
+DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
+DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
+
 if (defaultNameService == null) {
   LOG.warn(
   "{} and {} is not set. Fallback to {} as the default name service.",
@@ -176,9 +184,12 @@ public class MountTableResolver
 }
 
 if (this.defaultNameService.equals("")) {
+  this.defaultNSEnable = false;
   LOG.warn("Default name service is not set.");
 } else {
-  LOG.info("Default name service: {}", this.defaultNameService);
+  String enable = this.defaultNSEnable ? "enabled" : "disabled";
+  LOG.info("Default name service: {}, {} to read or write",
+  this.defaultNameService, enable);
 }
   }
 
@@ -406,13 +417,17 @@ public class MountTableResolver
* @param path Path to check/insert.
* @return New remote location.
*/
-  public PathLocation lookupLocation(final String path) {
+  public PathLocation lookupLocation(final String path) throws IOException {
 PathLocation ret = null;
 MountTable entry = findDeepest(path);
 if (entry != null) {
   ret = buildLocation(path, entry);
 } else {
   // Not found, use default location
+  if (!defaultNSEnable) {
+throw new IOException("Cannot find locations for " + path + ", " +
+"because the default nameservice is disabled to read or write");
+  }
   RemoteLocation remoteLocation =
   new RemoteLoca

[5/5] hadoop git commit: HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

2018-09-04 Thread inigoiri
HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. 
Contributed by yanghuafeng.

(cherry picked from commit 54f2044595206455484284b43e5976c8a1982aaf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e869cbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e869cbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e869cbc

Branch: refs/heads/branch-2.9
Commit: 2e869cbce48d41d2e6d4550f0ce238a13f919a0c
Parents: 6ed97eb
Author: Inigo Goiri 
Authored: Tue Sep 4 12:17:17 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Sep 4 12:18:49 2018 -0700

--
 .../federation/resolver/MountTableResolver.java | 39 +++-
 .../server/federation/router/RBFConfigKeys.java |  4 ++
 .../federation/router/RouterRpcServer.java  |  2 +-
 .../src/main/resources/hdfs-rbf-default.xml |  8 
 .../resolver/TestMountTableResolver.java| 26 +
 5 files changed, 76 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e869cbc/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index d45441f..bdd75c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
@@ -95,6 +97,8 @@ public class MountTableResolver
 
   /** Default nameservice when no mount matches the math. */
   private String defaultNameService = "";
+  /** If use default nameservice to read and write files. */
+  private boolean defaultNSEnable = true;
 
   /** Synchronization for both the tree and the cache. */
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -163,6 +167,10 @@ public class MountTableResolver
 DFS_ROUTER_DEFAULT_NAMESERVICE,
 DFSUtil.getNamenodeNameServiceId(conf));
 
+this.defaultNSEnable = conf.getBoolean(
+DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
+DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
+
 if (defaultNameService == null) {
   LOG.warn(
   "{} and {} is not set. Fallback to {} as the default name service.",
@@ -176,9 +184,12 @@ public class MountTableResolver
 }
 
 if (this.defaultNameService.equals("")) {
+  this.defaultNSEnable = false;
   LOG.warn("Default name service is not set.");
 } else {
-  LOG.info("Default name service: {}", this.defaultNameService);
+  String enable = this.defaultNSEnable ? "enabled" : "disabled";
+  LOG.info("Default name service: {}, {} to read or write",
+  this.defaultNameService, enable);
 }
   }
 
@@ -406,13 +417,17 @@ public class MountTableResolver
* @param path Path to check/insert.
* @return New remote location.
*/
-  public PathLocation lookupLocation(final String path) {
+  public PathLocation lookupLocation(final String path) throws IOException {
 PathLocation ret = null;
 MountTable entry = findDeepest(path);
 if (entry != null) {
   ret = buildLocation(path, entry);
 } else {
   // Not found, use default location
+  if (!defaultNSEnable) {
+throw new IOException("Cannot find locations for " + path + ", " +
+"because the default nameservice is disabled to read or write");
+  }
   RemoteLocation remoteLocation =
   new RemoteLo

[3/5] hadoop git commit: HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

2018-09-04 Thread inigoiri
HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. 
Contributed by yanghuafeng.

(cherry picked from commit 54f2044595206455484284b43e5976c8a1982aaf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54d3189c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54d3189c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54d3189c

Branch: refs/heads/branch-3.0
Commit: 54d3189cfaf58f366157088a7c6f3d44f026c485
Parents: 5514f02
Author: Inigo Goiri 
Authored: Tue Sep 4 12:17:17 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Sep 4 12:18:08 2018 -0700

--
 .../federation/resolver/MountTableResolver.java | 39 +++-
 .../server/federation/router/RBFConfigKeys.java |  4 ++
 .../federation/router/RouterRpcServer.java  |  2 +-
 .../src/main/resources/hdfs-rbf-default.xml |  8 
 .../resolver/TestMountTableResolver.java| 26 +
 5 files changed, 76 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54d3189c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index d45441f..bdd75c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
@@ -95,6 +97,8 @@ public class MountTableResolver
 
   /** Default nameservice when no mount matches the math. */
   private String defaultNameService = "";
+  /** If use default nameservice to read and write files. */
+  private boolean defaultNSEnable = true;
 
   /** Synchronization for both the tree and the cache. */
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -163,6 +167,10 @@ public class MountTableResolver
 DFS_ROUTER_DEFAULT_NAMESERVICE,
 DFSUtil.getNamenodeNameServiceId(conf));
 
+this.defaultNSEnable = conf.getBoolean(
+DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
+DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
+
 if (defaultNameService == null) {
   LOG.warn(
   "{} and {} is not set. Fallback to {} as the default name service.",
@@ -176,9 +184,12 @@ public class MountTableResolver
 }
 
 if (this.defaultNameService.equals("")) {
+  this.defaultNSEnable = false;
   LOG.warn("Default name service is not set.");
 } else {
-  LOG.info("Default name service: {}", this.defaultNameService);
+  String enable = this.defaultNSEnable ? "enabled" : "disabled";
+  LOG.info("Default name service: {}, {} to read or write",
+  this.defaultNameService, enable);
 }
   }
 
@@ -406,13 +417,17 @@ public class MountTableResolver
* @param path Path to check/insert.
* @return New remote location.
*/
-  public PathLocation lookupLocation(final String path) {
+  public PathLocation lookupLocation(final String path) throws IOException {
 PathLocation ret = null;
 MountTable entry = findDeepest(path);
 if (entry != null) {
   ret = buildLocation(path, entry);
 } else {
   // Not found, use default location
+  if (!defaultNSEnable) {
+throw new IOException("Cannot find locations for " + path + ", " +
+"because the default nameservice is disabled to read or write");
+  }
   RemoteLocation remoteLocation =
   new RemoteLo

[2/5] hadoop git commit: HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

2018-09-04 Thread inigoiri
HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. 
Contributed by yanghuafeng.

(cherry picked from commit 54f2044595206455484284b43e5976c8a1982aaf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2656596
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2656596
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2656596

Branch: refs/heads/branch-3.1
Commit: a26565960a2d3394d4b31528241ee6ae2c85b91a
Parents: 142d878
Author: Inigo Goiri 
Authored: Tue Sep 4 12:17:17 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Sep 4 12:17:49 2018 -0700

--
 .../federation/resolver/MountTableResolver.java | 39 +++-
 .../server/federation/router/RBFConfigKeys.java |  4 ++
 .../federation/router/RouterRpcServer.java  |  2 +-
 .../src/main/resources/hdfs-rbf-default.xml |  8 
 .../resolver/TestMountTableResolver.java| 26 +
 5 files changed, 76 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2656596/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index d45441f..bdd75c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
@@ -95,6 +97,8 @@ public class MountTableResolver
 
   /** Default nameservice when no mount matches the math. */
   private String defaultNameService = "";
+  /** If use default nameservice to read and write files. */
+  private boolean defaultNSEnable = true;
 
   /** Synchronization for both the tree and the cache. */
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -163,6 +167,10 @@ public class MountTableResolver
 DFS_ROUTER_DEFAULT_NAMESERVICE,
 DFSUtil.getNamenodeNameServiceId(conf));
 
+this.defaultNSEnable = conf.getBoolean(
+DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
+DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
+
 if (defaultNameService == null) {
   LOG.warn(
   "{} and {} is not set. Fallback to {} as the default name service.",
@@ -176,9 +184,12 @@ public class MountTableResolver
 }
 
 if (this.defaultNameService.equals("")) {
+  this.defaultNSEnable = false;
   LOG.warn("Default name service is not set.");
 } else {
-  LOG.info("Default name service: {}", this.defaultNameService);
+  String enable = this.defaultNSEnable ? "enabled" : "disabled";
+  LOG.info("Default name service: {}, {} to read or write",
+  this.defaultNameService, enable);
 }
   }
 
@@ -406,13 +417,17 @@ public class MountTableResolver
* @param path Path to check/insert.
* @return New remote location.
*/
-  public PathLocation lookupLocation(final String path) {
+  public PathLocation lookupLocation(final String path) throws IOException {
 PathLocation ret = null;
 MountTable entry = findDeepest(path);
 if (entry != null) {
   ret = buildLocation(path, entry);
 } else {
   // Not found, use default location
+  if (!defaultNSEnable) {
+throw new IOException("Cannot find locations for " + path + ", " +
+"because the default nameservice is disabled to read or write");
+  }
   RemoteLocation remoteLocation =
   new RemoteLo

[1/5] hadoop git commit: HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. Contributed by yanghuafeng.

2018-09-04 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5902c0658 -> 2e744bd31
  refs/heads/branch-2.9 6ed97eba2 -> 2e869cbce
  refs/heads/branch-3.0 5514f02a7 -> 54d3189cf
  refs/heads/branch-3.1 142d878c9 -> a26565960
  refs/heads/trunk 6bbd24901 -> 54f204459


HDFS-13857. RBF: Choose to enable the default nameservice to read/write files. 
Contributed by yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54f20445
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54f20445
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54f20445

Branch: refs/heads/trunk
Commit: 54f2044595206455484284b43e5976c8a1982aaf
Parents: 6bbd249
Author: Inigo Goiri 
Authored: Tue Sep 4 12:17:17 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Sep 4 12:17:17 2018 -0700

--
 .../federation/resolver/MountTableResolver.java | 39 +++-
 .../server/federation/router/RBFConfigKeys.java |  4 ++
 .../federation/router/RouterRpcServer.java  |  2 +-
 .../src/main/resources/hdfs-rbf-default.xml |  8 
 .../resolver/TestMountTableResolver.java| 26 +
 5 files changed, 76 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54f20445/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index d45441f..bdd75c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.federation.resolver;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
@@ -95,6 +97,8 @@ public class MountTableResolver
 
   /** Default nameservice when no mount matches the math. */
   private String defaultNameService = "";
+  /** If use default nameservice to read and write files. */
+  private boolean defaultNSEnable = true;
 
   /** Synchronization for both the tree and the cache. */
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -163,6 +167,10 @@ public class MountTableResolver
 DFS_ROUTER_DEFAULT_NAMESERVICE,
 DFSUtil.getNamenodeNameServiceId(conf));
 
+this.defaultNSEnable = conf.getBoolean(
+DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
+DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
+
 if (defaultNameService == null) {
   LOG.warn(
   "{} and {} is not set. Fallback to {} as the default name service.",
@@ -176,9 +184,12 @@ public class MountTableResolver
 }
 
 if (this.defaultNameService.equals("")) {
+  this.defaultNSEnable = false;
   LOG.warn("Default name service is not set.");
 } else {
-  LOG.info("Default name service: {}", this.defaultNameService);
+  String enable = this.defaultNSEnable ? "enabled" : "disabled";
+  LOG.info("Default name service: {}, {} to read or write",
+  this.defaultNameService, enable);
 }
   }
 
@@ -406,13 +417,17 @@ public class MountTableResolver
* @param path Path to check/insert.
* @return New remote location.
*/
-  public PathLocation lookupLocation(final String path) {
+  public PathLocation lookupLocation(final String path) throws IOException {
 PathLocation ret = null;
 MountTable entry = findDeepest(path);
 if (entry != null) {
   ret = buildLocation(path, entry);
 } else {
   // Not found, use default location
+  if (!defaultNSEnable) {
+throw new IOException("C

hadoop git commit: HADOOP-10219. ipc.Client.setupIOstreams() needs to check for ClientCache.stopClient requested shutdowns. Contributed by Kihwal Lee and Lukas Majercak.

2018-09-04 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 9cf35d99b -> 142d878c9


HADOOP-10219. ipc.Client.setupIOstreams() needs to check for 
ClientCache.stopClient requested shutdowns.
Contributed by Kihwal Lee and Lukas Majercak.

(cherry picked from commit 9e96ac666d783376a8cdea9c3cc84098c5bdcb56)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/142d878c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/142d878c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/142d878c

Branch: refs/heads/branch-3.1
Commit: 142d878c902da6bc02d0c21cdd5ca8674da7f5c8
Parents: 9cf35d9
Author: Steve Loughran 
Authored: Tue Sep 4 16:46:12 2018 +0100
Committer: Inigo Goiri 
Committed: Tue Sep 4 12:00:31 2018 -0700

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 14 ++
 .../java/org/apache/hadoop/ipc/TestIPC.java | 45 
 2 files changed, 59 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/142d878c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index e147048..07a2f13 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -70,6 +70,7 @@ import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 
 import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
 import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
@@ -439,6 +440,8 @@ public class Client implements AutoCloseable {
 
 private final Object sendRpcRequestLock = new Object();
 
+private AtomicReference connectingThread = new AtomicReference<>();
+
 public Connection(ConnectionId remoteId, int serviceClass) throws 
IOException {
   this.remoteId = remoteId;
   this.server = remoteId.getAddress();
@@ -777,6 +780,7 @@ public class Client implements AutoCloseable {
 }
   }
   try {
+connectingThread.set(Thread.currentThread());
 if (LOG.isDebugEnabled()) {
   LOG.debug("Connecting to "+server);
 }
@@ -862,6 +866,8 @@ public class Client implements AutoCloseable {
   markClosed(new IOException("Couldn't set up IO streams: " + t, t));
 }
 close();
+  } finally {
+connectingThread.set(null);
   }
 }
 
@@ -1215,6 +1221,13 @@ public class Client implements AutoCloseable {
 notifyAll();
   }
 }
+
+private void interruptConnectingThread() {
+  Thread connThread = connectingThread.get();
+  if (connThread != null) {
+connThread.interrupt();
+  }
+}
 
 /** Close the connection. */
 private synchronized void close() {
@@ -1321,6 +1334,7 @@ public class Client implements AutoCloseable {
 // wake up all connections
 for (Connection conn : connections.values()) {
   conn.interrupt();
+  conn.interruptConnectingThread();
 }
 
 // wait until all connections are closed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/142d878c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index a4577f2..cdbaea4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
@@ -1398,6 +1399,50 @@ public class TestIPC {
 assertEquals(Client.getTimeout(config), -1);
   }
 
+  @Test(timeout=6)
+  public void testSetupConnectionShouldNotBlockShutdown() throws Exception {
+// Start server
+SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
+Server server = new TestServer(1, true);
+final InetSocketAddress a

[1/2] hadoop git commit: HADOOP-10219. ipc.Client.setupIOstreams() needs to check for ClientCache.stopClient requested shutdowns. Contributed by Kihwal Lee and Lukas Majercak.

2018-09-04 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4e342f603 -> 5902c0658
  refs/heads/branch-2.9 809faede9 -> 6ed97eba2


HADOOP-10219. ipc.Client.setupIOstreams() needs to check for 
ClientCache.stopClient requested shutdowns.
Contributed by Kihwal Lee and Lukas Majercak.

(cherry picked from commit 9e96ac666d783376a8cdea9c3cc84098c5bdcb56)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5902c065
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5902c065
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5902c065

Branch: refs/heads/branch-2
Commit: 5902c0658eb7ad7092c2bdeb10c2296015c84c08
Parents: 4e342f6
Author: Steve Loughran 
Authored: Tue Sep 4 16:46:12 2018 +0100
Committer: Inigo Goiri 
Committed: Tue Sep 4 11:58:56 2018 -0700

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 14 ++
 .../java/org/apache/hadoop/ipc/TestIPC.java | 45 
 2 files changed, 59 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5902c065/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 533b6ca..2636adb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -70,6 +70,7 @@ import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 
 import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
 import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
@@ -440,6 +441,8 @@ public class Client implements AutoCloseable {
 
 private final Object sendRpcRequestLock = new Object();
 
+private AtomicReference connectingThread = new AtomicReference<>();
+
 public Connection(ConnectionId remoteId, int serviceClass) throws 
IOException {
   this.remoteId = remoteId;
   this.server = remoteId.getAddress();
@@ -777,6 +780,7 @@ public class Client implements AutoCloseable {
 }
   }
   try {
+connectingThread.set(Thread.currentThread());
 if (LOG.isDebugEnabled()) {
   LOG.debug("Connecting to "+server);
 }
@@ -862,6 +866,8 @@ public class Client implements AutoCloseable {
   markClosed(new IOException("Couldn't set up IO streams: " + t, t));
 }
 close();
+  } finally {
+connectingThread.set(null);
   }
 }
 
@@ -1215,6 +1221,13 @@ public class Client implements AutoCloseable {
 notifyAll();
   }
 }
+
+private void interruptConnectingThread() {
+  Thread connThread = connectingThread.get();
+  if (connThread != null) {
+connThread.interrupt();
+  }
+}
 
 /** Close the connection. */
 private synchronized void close() {
@@ -1317,6 +1330,7 @@ public class Client implements AutoCloseable {
 // wake up all connections
 for (Connection conn : connections.values()) {
   conn.interrupt();
+  conn.interruptConnectingThread();
 }
 
 // wait until all connections are closed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5902c065/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index a6c57fe..95e76f7 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
@@ -1398,6 +1399,50 @@ public class TestIPC {
 assertEquals(Client.getTimeout(config), -1);
   }
 
+  @Test(timeout=6)
+  public void testSetupConnectionShouldNotBlockShutdown() throws Exception {
+// Start server
+SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
+Server server = new TestServ

[2/2] hadoop git commit: HADOOP-10219. ipc.Client.setupIOstreams() needs to check for ClientCache.stopClient requested shutdowns. Contributed by Kihwal Lee and Lukas Majercak.

2018-09-04 Thread inigoiri
HADOOP-10219. ipc.Client.setupIOstreams() needs to check for 
ClientCache.stopClient requested shutdowns.
Contributed by Kihwal Lee and Lukas Majercak.

(cherry picked from commit 9e96ac666d783376a8cdea9c3cc84098c5bdcb56)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ed97eba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ed97eba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ed97eba

Branch: refs/heads/branch-2.9
Commit: 6ed97eba2237b16295e873beb99379e12f116a6e
Parents: 809faed
Author: Steve Loughran 
Authored: Tue Sep 4 16:46:12 2018 +0100
Committer: Inigo Goiri 
Committed: Tue Sep 4 11:59:23 2018 -0700

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 14 ++
 .../java/org/apache/hadoop/ipc/TestIPC.java | 45 
 2 files changed, 59 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ed97eba/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 533b6ca..2636adb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -70,6 +70,7 @@ import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 
 import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
 import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
@@ -440,6 +441,8 @@ public class Client implements AutoCloseable {
 
 private final Object sendRpcRequestLock = new Object();
 
+private AtomicReference connectingThread = new AtomicReference<>();
+
 public Connection(ConnectionId remoteId, int serviceClass) throws 
IOException {
   this.remoteId = remoteId;
   this.server = remoteId.getAddress();
@@ -777,6 +780,7 @@ public class Client implements AutoCloseable {
 }
   }
   try {
+connectingThread.set(Thread.currentThread());
 if (LOG.isDebugEnabled()) {
   LOG.debug("Connecting to "+server);
 }
@@ -862,6 +866,8 @@ public class Client implements AutoCloseable {
   markClosed(new IOException("Couldn't set up IO streams: " + t, t));
 }
 close();
+  } finally {
+connectingThread.set(null);
   }
 }
 
@@ -1215,6 +1221,13 @@ public class Client implements AutoCloseable {
 notifyAll();
   }
 }
+
+private void interruptConnectingThread() {
+  Thread connThread = connectingThread.get();
+  if (connThread != null) {
+connThread.interrupt();
+  }
+}
 
 /** Close the connection. */
 private synchronized void close() {
@@ -1317,6 +1330,7 @@ public class Client implements AutoCloseable {
 // wake up all connections
 for (Connection conn : connections.values()) {
   conn.interrupt();
+  conn.interruptConnectingThread();
 }
 
 // wait until all connections are closed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ed97eba/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index a6c57fe..95e76f7 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
@@ -1398,6 +1399,50 @@ public class TestIPC {
 assertEquals(Client.getTimeout(config), -1);
   }
 
+  @Test(timeout=6)
+  public void testSetupConnectionShouldNotBlockShutdown() throws Exception {
+// Start server
+SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
+Server server = new TestServer(1, true);
+final InetSocketAddress addr = NetUtils.getConnectAddress(server);
+
+// Track how many times we retried to

hadoop git commit: HDDS-98. Adding Ozone Manager Audit Log. Contributed by Dinesh Chitlangia.

2018-09-04 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk b9932162e -> 6bbd24901


HDDS-98. Adding Ozone Manager Audit Log. Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bbd2490
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bbd2490
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bbd2490

Branch: refs/heads/trunk
Commit: 6bbd2490111e0c90a4392a09f3af4a11a80d579c
Parents: b993216
Author: Nanda kumar 
Authored: Wed Sep 5 00:11:07 2018 +0530
Committer: Nanda kumar 
Committed: Wed Sep 5 00:11:07 2018 +0530

--
 .../src/main/compose/ozone/docker-config|  37 
 .../org/apache/hadoop/ozone/OzoneConsts.java|  32 +++
 hadoop-ozone/common/src/main/bin/ozone  |   2 +
 .../src/main/conf/om-audit-log4j2.properties|  86 
 .../org/apache/hadoop/ozone/audit/OMAction.java |  26 ++-
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  25 ++-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  21 +-
 .../hadoop/ozone/om/helpers/OmKeyArgs.java  |  22 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java   |  16 +-
 .../apache/hadoop/ozone/om/OzoneManager.java| 213 +++
 10 files changed, 468 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bbd2490/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index a1828a3..21127f8 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -31,3 +31,40 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
+
+#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
+LOG4J2.PROPERTIES_monitorInterval=30
+LOG4J2.PROPERTIES_filter=read,write
+LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.read.marker=READ
+LOG4J2.PROPERTIES_filter.read.onMatch=DENY
+LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.write.marker=WRITE
+LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_appenders=console, rolling
+LOG4J2.PROPERTIES_appender.console.type=Console
+LOG4J2.PROPERTIES_appender.console.name=STDOUT
+LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
+LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
+LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
+LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
+LOG4J2.PROPERTIES_loggers=audit
+LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
+LOG4J2.PROPERTIES_logger.audit.name=OMAudit
+LOG4J2.PROPERTIES_logger.audit.level=INFO
+LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
+LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
+LOG4J2.PROPERTIES_rootLogger.level=INFO
+LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
+LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bbd2490/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 8ea4d7f..eb37b79 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java

hadoop git commit: HDDS-75. Support for CopyContainer. Contributed by Elek, Marton.

2018-09-04 Thread nanda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9e96ac666 -> b9932162e


HDDS-75. Support for CopyContainer. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9932162
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9932162
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9932162

Branch: refs/heads/trunk
Commit: b9932162e9eb4acc9c790fc3c4938a5057fc1658
Parents: 9e96ac6
Author: Nanda kumar 
Authored: Tue Sep 4 23:41:50 2018 +0530
Committer: Nanda kumar 
Committed: Tue Sep 4 23:41:50 2018 +0530

--
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   3 +
 .../main/proto/DatanodeContainerProtocol.proto  |  12 +-
 .../common/src/main/resources/ozone-default.xml |  10 +-
 .../container/common/interfaces/Handler.java|  15 +-
 .../statemachine/DatanodeStateMachine.java  |  25 +--
 .../ReplicateContainerCommandHandler.java   | 124 -
 .../transport/server/XceiverServerGrpc.java |  12 +-
 .../container/keyvalue/KeyValueContainer.java   |   3 +-
 .../container/keyvalue/KeyValueHandler.java | 100 ++-
 .../container/ozoneimpl/OzoneContainer.java |   9 +-
 .../replication/ContainerDownloader.java|  40 +
 .../replication/ContainerReplicationSource.java |  49 ++
 .../replication/ContainerStreamingOutput.java   |  45 +
 .../replication/GrpcReplicationClient.java  | 169 ++
 .../replication/GrpcReplicationService.java | 130 ++
 .../OnDemandContainerReplicationSource.java |  76 
 .../replication/SimpleContainerDownloader.java  | 121 +
 .../container/replication/package-info.java |  21 +++
 .../TestReplicateContainerCommandHandler.java   | 146 
 .../commandhandler/package-info.java|  22 +++
 .../container/TestContainerReplication.java | 175 +++
 .../TestReplicateContainerHandler.java  |  70 
 .../container/ozoneimpl/TestOzoneContainer.java |   4 +-
 23 files changed, 1244 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9932162/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 8f53da5..0f2b108 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -265,6 +265,9 @@ public final class OzoneConfigKeys {
   public static final long
   HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_DEAFULT = 1L;
 
+  public static final String OZONE_CONTAINER_COPY_WORKDIR =
+  "hdds.datanode.replication.work.dir";
+
   /**
* There is no need to instantiate this class.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9932162/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 930f314..ba0d2d4 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -430,16 +430,22 @@ message CopyContainerRequestProto {
 }
 
 message CopyContainerResponseProto {
-  required string archiveName = 1;
+  required int64 containerID = 1;
   required uint64 readOffset = 2;
   required uint64 len = 3;
   required bool eof = 4;
-  repeated bytes data = 5;
+  required bytes data = 5;
   optional int64 checksum = 6;
 }
 
 service XceiverClientProtocolService {
   // A client-to-datanode RPC to send container commands
   rpc send(stream ContainerCommandRequestProto) returns
-(stream ContainerCommandResponseProto) {}
+(stream ContainerCommandResponseProto) {};
+
+}
+
+service IntraDatanodeProtocolService {
+  // An intradatanode service to copy the raw containerdata betwen nodes
+  rpc download (CopyContainerRequestProto) returns (stream 
CopyContainerResponseProto);
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9932162/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index a9fd10b..ca3da41 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources

hadoop git commit: HADOOP-10219. ipc.Client.setupIOstreams() needs to check for ClientCache.stopClient requested shutdowns. Contributed by Kihwal Lee and Lukas Majercak.

2018-09-04 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6e5ffb74d -> 9e96ac666


HADOOP-10219. ipc.Client.setupIOstreams() needs to check for 
ClientCache.stopClient requested shutdowns.
Contributed by Kihwal Lee and Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e96ac66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e96ac66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e96ac66

Branch: refs/heads/trunk
Commit: 9e96ac666d783376a8cdea9c3cc84098c5bdcb56
Parents: 6e5ffb7
Author: Steve Loughran 
Authored: Tue Sep 4 16:46:12 2018 +0100
Committer: Steve Loughran 
Committed: Tue Sep 4 16:46:12 2018 +0100

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 14 ++
 .../java/org/apache/hadoop/ipc/TestIPC.java | 45 
 2 files changed, 59 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e96ac66/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index e147048..07a2f13 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -70,6 +70,7 @@ import java.util.concurrent.*;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 
 import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
 import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
@@ -439,6 +440,8 @@ public class Client implements AutoCloseable {
 
 private final Object sendRpcRequestLock = new Object();
 
+private AtomicReference connectingThread = new AtomicReference<>();
+
 public Connection(ConnectionId remoteId, int serviceClass) throws 
IOException {
   this.remoteId = remoteId;
   this.server = remoteId.getAddress();
@@ -777,6 +780,7 @@ public class Client implements AutoCloseable {
 }
   }
   try {
+connectingThread.set(Thread.currentThread());
 if (LOG.isDebugEnabled()) {
   LOG.debug("Connecting to "+server);
 }
@@ -862,6 +866,8 @@ public class Client implements AutoCloseable {
   markClosed(new IOException("Couldn't set up IO streams: " + t, t));
 }
 close();
+  } finally {
+connectingThread.set(null);
   }
 }
 
@@ -1215,6 +1221,13 @@ public class Client implements AutoCloseable {
 notifyAll();
   }
 }
+
+private void interruptConnectingThread() {
+  Thread connThread = connectingThread.get();
+  if (connThread != null) {
+connThread.interrupt();
+  }
+}
 
 /** Close the connection. */
 private synchronized void close() {
@@ -1321,6 +1334,7 @@ public class Client implements AutoCloseable {
 // wake up all connections
 for (Connection conn : connections.values()) {
   conn.interrupt();
+  conn.interruptConnectingThread();
 }
 
 // wait until all connections are closed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e96ac66/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 84b82e2..19314c1 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
@@ -1398,6 +1399,50 @@ public class TestIPC {
 assertEquals(Client.getTimeout(config), -1);
   }
 
+  @Test(timeout=6)
+  public void testSetupConnectionShouldNotBlockShutdown() throws Exception {
+// Start server
+SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
+Server server = new TestServer(1, true);
+final InetSocketAddress addr = NetUtils.getConnectAddress(server);
+
+// Track how many times we r

hadoop git commit: HADOOP-15547/ WASB: improve listStatus performance. Contributed by Thomas Marquardt.

2018-09-04 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 21aa7f1d8 -> 4e342f603


HADOOP-15547/ WASB: improve listStatus performance.
Contributed by Thomas Marquardt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e342f60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e342f60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e342f60

Branch: refs/heads/branch-2
Commit: 4e342f603f17e621c7650c41a7d4298dbdc2cab4
Parents: 21aa7f1
Author: Steve Loughran 
Authored: Tue Sep 4 16:32:22 2018 +0100
Committer: Steve Loughran 
Committed: Tue Sep 4 16:32:22 2018 +0100

--
 .../dev-support/findbugs-exclude.xml|  10 +
 hadoop-tools/hadoop-azure/pom.xml   |  12 +
 .../fs/azure/AzureNativeFileSystemStore.java| 182 -
 .../apache/hadoop/fs/azure/FileMetadata.java|  77 ++--
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 376 ---
 .../hadoop/fs/azure/NativeFileSystemStore.java  |  15 +-
 .../apache/hadoop/fs/azure/PartialListing.java  |  61 ---
 .../hadoop/fs/azure/ITestListPerformance.java   | 196 ++
 ...estNativeAzureFileSystemConcurrencyLive.java |   4 +-
 .../fs/azure/NativeAzureFileSystemBaseTest.java |   2 +-
 10 files changed, 517 insertions(+), 418 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e342f60/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
index cde1734..38de35e 100644
--- a/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-azure/dev-support/findbugs-exclude.xml
@@ -47,4 +47,14 @@


  
+
+
+
+
+
+
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e342f60/hadoop-tools/hadoop-azure/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure/pom.xml 
b/hadoop-tools/hadoop-azure/pom.xml
index c9325ff..f4b0691 100644
--- a/hadoop-tools/hadoop-azure/pom.xml
+++ b/hadoop-tools/hadoop-azure/pom.xml
@@ -43,6 +43,8 @@
 
unset
 
 7200
+
10
+
1000
   
 
   
@@ -275,6 +277,8 @@
 
${fs.azure.scale.test.huge.filesize}
 
${fs.azure.scale.test.huge.partitionsize}
 
${fs.azure.scale.test.timeout}
+
${fs.azure.scale.test.list.performance.threads}
+
${fs.azure.scale.test.list.performance.files}
   
   
 **/Test*.java
@@ -303,6 +307,8 @@
 
${fs.azure.scale.test.huge.filesize}
 
${fs.azure.scale.test.huge.partitionsize}
 
${fs.azure.scale.test.timeout}
+
${fs.azure.scale.test.list.performance.threads}
+
${fs.azure.scale.test.list.performance.files}
   
   
 **/TestRollingWindowAverage*.java
@@ -344,6 +350,8 @@
 
${fs.azure.scale.test.huge.filesize}
 
${fs.azure.scale.test.huge.partitionsize}
 
${fs.azure.scale.test.timeout}
+
${fs.azure.scale.test.list.performance.threads}
+
${fs.azure.scale.test.list.performance.files}
   
   
   
@@ -389,6 +397,8 @@
 
${fs.azure.scale.test.huge.filesize}
 
${fs.azure.scale.test.huge.partitionsize}
 
${fs.azure.scale.test.timeout}
+
${fs.azure.scale.test.list.performance.threads}
+
${fs.azure.scale.test.list.performance.files}
   
   
 
**/ITestFileSystemOperationsExceptionHandlingMultiThreaded.java
@@ -431,6 +441,8 @@
 
${fs.azure.scale.test.enabled}
 
${fs.azure.scale.test.huge.filesize}
 
${fs.azure.scale.test.timeout}
+
${fs.azure.scale.test.list.performance.threads}
+
${fs.azure.scale.test.list.performance.files}
   
   
${fs.azure.scale.test.timeout}
   false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e342f60/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs

[45/50] [abbrv] hadoop git commit: HDDS-263. Add retries in Ozone Client to handle BlockNotCommitted Exception. Contributed by Shashikant Banerjee.

2018-09-04 Thread ehiggs
HDDS-263. Add retries in Ozone Client to handle BlockNotCommitted Exception. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/873ef8ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/873ef8ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/873ef8ae

Branch: refs/heads/HDFS-12090
Commit: 873ef8ae81321325889c9d3a6939163e98fbf5bb
Parents: ff036e4
Author: Mukul Kumar Singh 
Authored: Mon Sep 3 12:26:34 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Mon Sep 3 12:26:34 2018 +0530

--
 .../helpers/BlockNotCommittedException.java | 36 
 .../scm/storage/ContainerProtocolCalls.java |  5 ++
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  8 ++
 .../common/src/main/resources/ozone-default.xml | 16 
 .../hadoop/ozone/client/OzoneClientUtils.java   | 28 ++
 .../ozone/client/io/ChunkGroupOutputStream.java | 89 +++
 .../hadoop/ozone/client/rpc/RpcClient.java  |  5 ++
 .../rpc/TestCloseContainerHandlingByClient.java | 91 +---
 .../web/storage/DistributedStorageHandler.java  |  5 ++
 9 files changed, 254 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/873ef8ae/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
new file mode 100644
index 000..86f5a66
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+
+/**
+ * Exceptions thrown when a block is yet to be committed on the datanode.
+ */
+public class BlockNotCommittedException extends StorageContainerException {
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the {@link #getMessage()} method)
+   */
+  public BlockNotCommittedException(String message) {
+super(message, ContainerProtos.Result.BLOCK_NOT_COMMITTED);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/873ef8ae/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 1f2fafb..1d6a89d 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
+import org.apache.hadoop.hdds.scm.container.common.helpers
+.BlockNotCommittedException;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers
@@ -420,6 +422,9 @@ public final class ContainerProtocolCalls  {
   ) throws StorageContainerException {
 if (response.getResult() == ContainerProtos.Result.SUCCESS) {
   return;
+} else if (response.getResult()
+== ContainerProtos.Result.BLOCK_NOT_COMMITTED) {
+  throw new BlockNotCommittedException(response.getMessage());
 }
 throw new StorageContainerException(
 response.getMessage(), response.get

[42/50] [abbrv] hadoop git commit: YARN-8535. Fix DistributedShell unit tests. Contributed by Abhishek Modi.

2018-09-04 Thread ehiggs
YARN-8535. Fix DistributedShell unit tests. Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eed8415d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eed8415d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eed8415d

Branch: refs/heads/HDFS-12090
Commit: eed8415dc18fa7415ebd105350bd0532b3b1b6bb
Parents: 6edf3d2
Author: bibinchundatt 
Authored: Sun Sep 2 13:35:52 2018 +0530
Committer: bibinchundatt 
Committed: Sun Sep 2 13:35:52 2018 +0530

--
 .../yarn/applications/distributedshell/ApplicationMaster.java| 2 +-
 .../yarn/applications/distributedshell/TestDistributedShell.java | 1 +
 .../test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java | 4 +++-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed8415d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 76fa38f..ecf07b1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -944,7 +944,7 @@ public class ApplicationMaster {
 
 // When the application completes, it should send a finish application
 // signal to the RM
-LOG.info("Application completed. Signalling finish to RM");
+LOG.info("Application completed. Signalling finished to RM");
 
 FinalApplicationStatus appStatus;
 boolean success = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed8415d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index 3a98a22..c7e1cf1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -624,6 +624,7 @@ public class TestDistributedShell {
   String entityfileName) {
 String outputDirPathForEntity =
 basePath + File.separator + entityType + File.separator;
+LOG.info(outputDirPathForEntity);
 File outputDirForEntity = new File(outputDirPathForEntity);
 Assert.assertTrue(outputDirForEntity.isDirectory());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eed8415d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 0395138..fa69f18 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -577,7 +577,9 @@ public class MiniYARNCluster extends CompositeService {
   config.set(YarnConfiguration.NM_

[43/50] [abbrv] hadoop git commit: HDDS-357. Use DBStore and TableStore for OzoneManager non-background service. Contributed by Nandakumar.

2018-09-04 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff036e49/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 21d2411..151fddf 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -19,77 +19,178 @@ package org.apache.hadoop.ozone.om;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
-
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BatchOperation;
-import org.apache.hadoop.utils.MetadataKeyFilters;
-import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
+import org.apache.hadoop.utils.db.DBStore;
+import org.apache.hadoop.utils.db.DBStoreBuilder;
+import org.apache.hadoop.utils.db.Table;
+import org.apache.hadoop.utils.db.TableIterator;
+import org.eclipse.jetty.util.StringUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import java.util.ArrayList;
 import java.util.Map;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.stream.Collectors;
 
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_ID_DELIMINATOR;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_PREFIX;
-import static org.apache.hadoop.ozone.om.OMConfigKeys
-.OZONE_OM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys
-.OZONE_OM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 
 /**
  * Ozone metadata manager interface.
  */
 public class OmMetadataManagerImpl implements OMMetadataManager {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(OmMetadataManagerImpl.class);
+
+  /**
+   * OM RocksDB Structure .
+   * 
+   * OM DB stores metadata as KV pairs in different column families.
+   * 
+   * OM DB Schema:
+   * |---|
+   * |  Column Family |VALUE |
+   * |---|
+   * | userTable  | user->VolumeList |
+   * |---|
+   * | volumeTable| /volume

[46/50] [abbrv] hadoop git commit: HDFS-13867. RBF: Add validation for max arguments for Router admin ls, clrQuota, setQuota, rm and nameservice commands. Contributed by Ayush Saxena.

2018-09-04 Thread ehiggs
HDFS-13867. RBF: Add validation for max arguments for Router admin ls, 
clrQuota, setQuota, rm and nameservice commands. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/780df903
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/780df903
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/780df903

Branch: refs/heads/HDFS-12090
Commit: 780df9034f265a8e602856b34cc21d9be02f5c48
Parents: 873ef8a
Author: Vinayakumar B 
Authored: Mon Sep 3 14:28:31 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Sep 3 14:28:31 2018 +0530

--
 .../hdfs/tools/federation/RouterAdmin.java  | 45 ++--
 .../federation/router/TestRouterAdminCLI.java   | 35 +++
 2 files changed, 76 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/780df903/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 46be373..ef8d7c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -146,6 +146,43 @@ public class RouterAdmin extends Configured implements 
Tool {
 return getUsage(null);
   }
 
+  /**
+   * Usage: validates the maximum number of arguments for a command.
+   * @param arg List of of command line parameters.
+   */
+  private void validateMax(String[] arg) {
+if (arg[0].equals("-rm")) {
+  if (arg.length > 2) {
+throw new IllegalArgumentException(
+"Too many arguments, Max=1 argument allowed");
+  }
+} else if (arg[0].equals("-ls")) {
+  if (arg.length > 2) {
+throw new IllegalArgumentException(
+"Too many arguments, Max=1 argument allowed");
+  }
+} else if (arg[0].equals("-clrQuota")) {
+  if (arg.length > 2) {
+throw new IllegalArgumentException(
+"Too many arguments, Max=1 argument allowed");
+  }
+} else if (arg[0].equals("-safemode")) {
+  if (arg.length > 2) {
+throw new IllegalArgumentException(
+"Too many arguments, Max=1 argument allowed only");
+  }
+} else if (arg[0].equals("-nameservice")) {
+  if (arg.length > 3) {
+throw new IllegalArgumentException(
+"Too many arguments, Max=2 arguments allowed");
+  }
+} else if (arg[0].equals("-getDisabledNameservices")) {
+  if (arg.length > 1) {
+throw new IllegalArgumentException("No arguments allowed");
+  }
+}
+  }
+
   @Override
   public int run(String[] argv) throws Exception {
 if (argv.length < 1) {
@@ -222,6 +259,7 @@ public class RouterAdmin extends Configured implements Tool 
{
 Exception debugException = null;
 exitCode = 0;
 try {
+  validateMax(argv);
   if ("-add".equals(cmd)) {
 if (addMount(argv, i)) {
   System.out.println("Successfully added mount point " + argv[i]);
@@ -251,10 +289,6 @@ public class RouterAdmin extends Configured implements 
Tool {
   "Successfully clear quota for mount point " + argv[i]);
 }
   } else if ("-safemode".equals(cmd)) {
-if (argv.length > 2) {
-  throw new IllegalArgumentException(
-  "Too many arguments, Max=1 argument allowed only");
-}
 manageSafeMode(argv[i]);
   } else if ("-nameservice".equals(cmd)) {
 String subcmd = argv[i];
@@ -641,6 +675,9 @@ public class RouterAdmin extends Configured implements Tool 
{
   throw new IllegalArgumentException(
   "Cannot parse ssQuota: " + parameters[i]);
 }
+  } else {
+throw new IllegalArgumentException(
+"Invalid argument : " + parameters[i]);
   }
 
   i++;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/780df903/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 0c7321f..fa29cd9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federa

[47/50] [abbrv] hadoop git commit: HDFS-13774. EC: 'hdfs ec -getPolicy' is not retrieving policy details when the special REPLICATION policy set on the directory. Contributed by Ayush Saxena.

2018-09-04 Thread ehiggs
HDFS-13774. EC: 'hdfs ec -getPolicy' is not retrieving policy details when the 
special REPLICATION policy set on the directory. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3801436e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3801436e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3801436e

Branch: refs/heads/HDFS-12090
Commit: 3801436e49822c724c8f4e393e6e5abbd0d2573a
Parents: 780df90
Author: Vinayakumar B 
Authored: Mon Sep 3 14:37:57 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Sep 3 14:37:57 2018 +0530

--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md   | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3801436e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 2e8cbbd..67e6b75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -65,11 +65,11 @@ Architecture
 
   2. _The size of a striping cell._ This determines the granularity of 
striped reads and writes, including buffer sizes and encoding work.
 
-Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell 
size*. Currently, six built-in policies are supported: `RS-3-2-1024k`, 
`RS-6-3-1024k`, `RS-10-4-1024k`, `RS-LEGACY-6-3-1024k`, `XOR-2-1-1024k` and 
`REPLICATION`.
+Policies are named *codec*-*num data blocks*-*num parity blocks*-*cell 
size*. Currently, five built-in policies are supported: `RS-3-2-1024k`, 
`RS-6-3-1024k`, `RS-10-4-1024k`, `RS-LEGACY-6-3-1024k`, `XOR-2-1-1024k`.
 
-`REPLICATION` is a special policy. It can only be set on directory, to 
force the directory to adopt 3x replication scheme, instead of inheriting its 
ancestor's erasure coding policy. This policy makes it possible to interleave 
3x replication scheme directory with erasure coding directory.
+The default `REPLICATION` scheme is also supported. It can only be set on 
directory, to force the directory to adopt 3x replication scheme, instead of 
inheriting its ancestor's erasure coding policy. This policy makes it possible 
to interleave 3x replication scheme directory with erasure coding directory.
 
-`REPLICATION` policy is always enabled. For other built-in policies, they 
are disabled by default.
+`REPLICATION` is always enabled. Out of all the EC policies, RS(6,3) is 
enabled by default.
 
 Similar to HDFS storage policies, erasure coding policies are set on a 
directory. When a file is created, it inherits the EC policy of its nearest 
ancestor directory.
 
@@ -184,7 +184,7 @@ Below are the details about each command.
   This parameter can be omitted if a 
'dfs.namenode.ec.system.default.policy' configuration is set.
   The EC policy of the path will be set with the default value in 
configuration.
 
-  `-replicate` apply the special `REPLICATION` policy on the directory, 
force the directory to adopt 3x replication scheme.
+  `-replicate` apply the default `REPLICATION` scheme on the directory, 
force the directory to adopt 3x replication scheme.
 
   `-replicate` and `-policy ` are optional arguments. They 
cannot be specified at the same time.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: HDFS-13027. Handle possible NPEs due to deleted blocks in race condition. Contributed by Vinayakumar B.

2018-09-04 Thread ehiggs
HDFS-13027. Handle possible NPEs due to deleted blocks in race condition. 
Contributed by Vinayakumar B.

(cherry picked from commit 65977e5d8124be2bc208af25beed934933f170b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c36d69a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c36d69a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c36d69a7

Branch: refs/heads/HDFS-12090
Commit: c36d69a7b30927eaea16335e06cfcc247accde35
Parents: f2c2a68
Author: Vinayakumar B 
Authored: Wed Aug 29 22:40:13 2018 +0530
Committer: Vinayakumar B 
Committed: Thu Aug 30 22:15:51 2018 +0530

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java| 2 +-
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java| 5 -
 4 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36d69a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index 43f4f47..d160f61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -52,7 +52,7 @@ public abstract class BlockInfo extends Block
   /**
* Block collection ID.
*/
-  private long bcId;
+  private volatile long bcId;
 
   /** For implementing {@link LightWeightGSet.LinkedElement} interface. */
   private LightWeightGSet.LinkedElement nextLinkedElement;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36d69a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 17f6f6e..675221a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4171,6 +4171,10 @@ public class BlockManager implements BlockStatsMXBean {
 int numExtraRedundancy = 0;
 while(it.hasNext()) {
   final BlockInfo block = it.next();
+  if (block.isDeleted()) {
+//Orphan block, will be handled eventually, skip
+continue;
+  }
   int expectedReplication = this.getExpectedRedundancyNum(block);
   NumberReplicas num = countNodes(block);
   if (shouldProcessExtraRedundancy(num, expectedReplication)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36d69a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6ba0e0b..74c9f10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4128,7 +4128,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 while (it.hasNext()) {
   Block b = it.next();
   BlockInfo blockInfo = blockManager.getStoredBlock(b);
-  if (blockInfo == null) {
+  if (blockInfo == null || blockInfo.isDeleted()) {
 LOG.info("Cannot find block info for block " + b);
   } else {
 BlockCollection bc = getBlockCollection(blockInfo);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36d69a7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/

[48/50] [abbrv] hadoop git commit: HDDS-336. Print out container location information for a specific ozone key . Contributed by LiXin Ge.

2018-09-04 Thread ehiggs
HDDS-336. Print out container location information for a specific ozone key . 
Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/211034a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/211034a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/211034a6

Branch: refs/heads/HDFS-12090
Commit: 211034a6c22dd4ebe697481ea4d57b5eb932fa08
Parents: 3801436
Author: Márton Elek 
Authored: Mon Sep 3 13:32:55 2018 +0200
Committer: Márton Elek 
Committed: Mon Sep 3 13:32:55 2018 +0200

--
 .../apache/hadoop/ozone/client/OzoneBucket.java |   4 +-
 .../hadoop/ozone/client/OzoneClientUtils.java   |  29 -
 .../hadoop/ozone/client/OzoneKeyDetails.java|  58 ++
 .../hadoop/ozone/client/OzoneKeyLocation.java   |  82 ++
 .../ozone/client/protocol/ClientProtocol.java   |  10 +-
 .../hadoop/ozone/client/rest/RestClient.java|  27 ++---
 .../hadoop/ozone/client/rpc/RpcClient.java  |  22 ++--
 .../ozone/client/rest/headers/Header.java   |   1 +
 .../client/rest/response/KeyInfoDetails.java| 107 +++
 .../ozone/client/rest/response/KeyLocation.java |  89 +++
 .../ozone/web/response/KeyInfoDetails.java  |  80 ++
 .../hadoop/ozone/web/response/KeyLocation.java  |  82 ++
 .../ozone/client/rest/TestOzoneRestClient.java  |  86 +--
 .../ozone/client/rpc/TestOzoneRpcClient.java| 101 +++--
 .../hadoop/ozone/ozShell/TestOzoneShell.java|   6 +-
 .../hadoop/ozone/web/handlers/KeyHandler.java   |  12 +++
 .../ozone/web/interfaces/StorageHandler.java|  12 +++
 .../web/storage/DistributedStorageHandler.java  |  33 --
 .../ozone/web/ozShell/keys/InfoKeyHandler.java  |  10 +-
 19 files changed, 779 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/211034a6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 2f3cff6..97bd682 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -258,10 +258,10 @@ public class OzoneBucket {
   /**
* Returns information about the key.
* @param key Name of the key.
-   * @return OzoneKey Information about the key.
+   * @return OzoneKeyDetails Information about the key.
* @throws IOException
*/
-  public OzoneKey getKey(String key) throws IOException {
+  public OzoneKeyDetails getKey(String key) throws IOException {
 return proxy.getKeyDetails(volumeName, name, key);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/211034a6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
index 5d57753..40e4d83 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
@@ -25,10 +25,10 @@ import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
-import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
-import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
-import org.apache.hadoop.ozone.client.rest.response.VolumeOwner;
+import org.apache.hadoop.ozone.client.rest.response.*;
+
+import java.util.ArrayList;
+import java.util.List;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -112,4 +112,25 @@ public final class OzoneClientUtils {
 exceptionToPolicyMap);
 return retryPolicy;
   }
+  /**
+   * Returns a KeyInfoDetails object constructed using fields of the input
+   * OzoneKeyDetails object.
+   *
+   * @param key OzoneKeyDetails instance from which KeyInfo object needs to
+   *be created.
+   * @return KeyInfoDetails instance
+   */
+  public static KeyInfoDetails asKeyInfoDetails(OzoneKeyDetails key) {
+KeyInfoDetails keyInfo = new KeyInfoDetails();
+keyInfo.setKeyName(key.getName());
+
keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime()));
+keyInfo.setModifiedO

[20/50] [abbrv] hadoop git commit: HDDS-365. Implement flushStateMachineData for containerStateMachine. Contributed by Shashikant Banerjee.

2018-09-04 Thread ehiggs
HDDS-365. Implement flushStateMachineData for containerStateMachine. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2651e2c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2651e2c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2651e2c4

Branch: refs/heads/HDFS-12090
Commit: 2651e2c43d0825912669a87afc256bad9f1ea6ed
Parents: 7ed458b
Author: Mukul Kumar Singh 
Authored: Wed Aug 29 07:57:57 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Wed Aug 29 07:58:30 2018 +0530

--
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java |  2 +-
 .../transport/server/XceiverServerGrpc.java   |  2 +-
 .../server/ratis/ContainerStateMachine.java   | 18 ++
 hadoop-project/pom.xml|  2 +-
 4 files changed, 21 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2651e2c4/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index e2416c2..1622ddb 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -87,7 +87,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 }
 LOG.debug("Connecting to server Port : " + leader.getIpAddress());
 channel = NettyChannelBuilder.forAddress(leader.getIpAddress(), port)
-.usePlaintext(true)
+.usePlaintext()
 .maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
 .build();
 asyncStub = XceiverClientProtocolServiceGrpc.newStub(channel);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2651e2c4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index f4f3f6f..4dc232d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -77,7 +77,7 @@ public final class XceiverServerGrpc implements 
XceiverServerSpi {
 datanodeDetails.setPort(
 DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, port));
 server = ((NettyServerBuilder) ServerBuilder.forPort(port))
-.maxMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
+.maxInboundMessageSize(OzoneConfigKeys.DFS_CONTAINER_CHUNK_MAX_SIZE)
 .addService(new GrpcXceiverService(dispatcher))
 .build();
 storageContainer = dispatcher;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2651e2c4/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index ede87f4..68d6d5b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -59,6 +59,7 @@ import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
+import java.util.stream.Collectors;
 
 /** A {@link org.apache.ratis.statemachine.StateMachine} for containers.
  *
@@ -316,6 +317,23 @@ public class ContainerStateMachine extends 
BaseStateMachine {
 return LogEntryProto.newBuilder().setSmLogEntry(log).build();
   }
 
+  /**
+   * Returns the combined future of all the writeChunks till the given log
+   * index. The Raft log worker will wait for the stateMachineData to complete
+   * flush as well.
+   *
+   * @param index log inde

[35/50] [abbrv] hadoop git commit: Revert "HDFS-13838. WebHdfsFileSystem.getFileStatus() won't return correct "snapshot enabled" status. Contributed by Siyao Meng."

2018-09-04 Thread ehiggs
Revert "HDFS-13838. WebHdfsFileSystem.getFileStatus() won't return correct 
"snapshot enabled" status. Contributed by Siyao Meng."

This reverts commit 26c2a97c566969f50eb8e8432009724c51152a98.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aa6c4f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aa6c4f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aa6c4f0

Branch: refs/heads/HDFS-12090
Commit: 8aa6c4f079fd38a3230bc070c2ce837fefbc5301
Parents: c36d69a
Author: Wei-Chiu Chuang 
Authored: Thu Aug 30 11:44:20 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Aug 30 11:44:20 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/web/JsonUtilClient.java |  4 
 .../java/org/apache/hadoop/hdfs/web/TestWebHDFS.java| 12 
 2 files changed, 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aa6c4f0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index a685573..9bb1846 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -133,7 +133,6 @@ class JsonUtilClient {
 Boolean aclBit = (Boolean) m.get("aclBit");
 Boolean encBit = (Boolean) m.get("encBit");
 Boolean erasureBit  = (Boolean) m.get("ecBit");
-Boolean snapshotEnabledBit  = (Boolean) m.get("snapshotEnabled");
 EnumSet f =
 EnumSet.noneOf(HdfsFileStatus.Flags.class);
 if (aclBit != null && aclBit) {
@@ -145,9 +144,6 @@ class JsonUtilClient {
 if (erasureBit != null && erasureBit) {
   f.add(HdfsFileStatus.Flags.HAS_EC);
 }
-if (snapshotEnabledBit != null && snapshotEnabledBit) {
-  f.add(HdfsFileStatus.Flags.SNAPSHOT_ENABLED);
-}
 
 Map ecPolicyObj = (Map) m.get("ecPolicyObj");
 ErasureCodingPolicy ecPolicy = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aa6c4f0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 9152636..cbc428a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -482,9 +482,6 @@ public class TestWebHDFS {
 
   // allow snapshots on /bar using webhdfs
   webHdfs.allowSnapshot(bar);
-  // check if snapshot status is enabled
-  assertTrue(dfs.getFileStatus(bar).isSnapshotEnabled());
-  assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   webHdfs.createSnapshot(bar, "s1");
   final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
   Assert.assertTrue(webHdfs.exists(s1path));
@@ -494,24 +491,15 @@ public class TestWebHDFS {
   assertEquals(bar, snapshottableDirs[0].getFullPath());
   dfs.deleteSnapshot(bar, "s1");
   dfs.disallowSnapshot(bar);
-  // check if snapshot status is disabled
-  assertFalse(dfs.getFileStatus(bar).isSnapshotEnabled());
-  assertFalse(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertNull(snapshottableDirs);
 
   // disallow snapshots on /bar using webhdfs
   dfs.allowSnapshot(bar);
-  // check if snapshot status is enabled, again
-  assertTrue(dfs.getFileStatus(bar).isSnapshotEnabled());
-  assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertEquals(1, snapshottableDirs.length);
   assertEquals(bar, snapshottableDirs[0].getFullPath());
   webHdfs.disallowSnapshot(bar);
-  // check if snapshot status is disabled, again
-  assertFalse(dfs.getFileStatus(bar).isSnapshotEnabled());
-  assertFalse(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertNull(snapshottableDirs);
   try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDFS-13837. Enable debug log for LeaseRenewer in TestDistributedFileSystem. Contributed by Shweta.

2018-09-04 Thread ehiggs
HDFS-13837. Enable debug log for LeaseRenewer in TestDistributedFileSystem. 
Contributed by Shweta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33f42efc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33f42efc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33f42efc

Branch: refs/heads/HDFS-12090
Commit: 33f42efc947445b7755da6aad34b5e26b96ad663
Parents: ac515d2
Author: Shweta 
Authored: Tue Aug 28 13:51:04 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 28 13:56:32 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33f42efc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 46323dd..cae0fbf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -100,12 +100,12 @@ import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.InOrder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 public class TestDistributedFileSystem {
   private static final Random RAN = new Random();
@@ -113,7 +113,8 @@ public class TestDistributedFileSystem {
   TestDistributedFileSystem.class);
 
   static {
-GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
+GenericTestUtils.setLogLevel(DFSClient.LOG, Level.TRACE);
+GenericTestUtils.setLogLevel(LeaseRenewer.LOG, Level.DEBUG);
   }
 
   private boolean dualPortTesting = false;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: HDDS-359. RocksDB Profiles support. Contributed by Anu Engineer.

2018-09-04 Thread ehiggs
HDDS-359. RocksDB Profiles support. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c61824a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c61824a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c61824a1

Branch: refs/heads/HDFS-12090
Commit: c61824a18940ef37dc7201717a3115a78bf942d4
Parents: df21e1b
Author: Márton Elek 
Authored: Tue Aug 28 19:22:30 2018 +0200
Committer: Márton Elek 
Committed: Tue Aug 28 19:33:13 2018 +0200

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   6 +
 .../hadoop/utils/db/DBConfigFromFile.java   | 134 +
 .../org/apache/hadoop/utils/db/DBProfile.java   | 120 +++
 .../apache/hadoop/utils/db/DBStoreBuilder.java  | 201 +++
 .../org/apache/hadoop/utils/db/RDBStore.java|  32 +--
 .../org/apache/hadoop/utils/db/TableConfig.java |  93 +
 .../common/src/main/resources/ozone-default.xml |  10 +
 .../hadoop/utils/db/TestDBConfigFromFile.java   | 116 +++
 .../hadoop/utils/db/TestDBStoreBuilder.java | 174 
 .../apache/hadoop/utils/db/TestRDBStore.java|  17 +-
 .../hadoop/utils/db/TestRDBTableStore.java  |  11 +-
 .../common/src/test/resources/test.db.ini   | 145 +
 hadoop-hdds/pom.xml |   1 +
 13 files changed, 1040 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c61824a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index d25af80..8272ed7 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdds;
 
+import org.apache.hadoop.utils.db.DBProfile;
+
 /**
  * This class contains constants for configuration keys and default values
  * used in hdds.
@@ -58,4 +60,8 @@ public final class HddsConfigKeys {
   public static final String HDDS_DATANODE_VOLUME_CHOOSING_POLICY =
   "hdds.datanode.volume.choosing.policy";
 
+  // DB Profiles used by ROCKDB instances.
+  public static final String HDDS_DB_PROFILE = "hdds.db.profile";
+  public static final DBProfile HDDS_DEFAULT_DB_PROFILE = DBProfile.SSD;
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c61824a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
new file mode 100644
index 000..753a460
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBConfigFromFile.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.utils.db;
+
+import com.google.common.base.Preconditions;
+import org.eclipse.jetty.util.StringUtil;
+import org.rocksdb.ColumnFamilyDescriptor;
+import org.rocksdb.DBOptions;
+import org.rocksdb.Env;
+import org.rocksdb.OptionsUtil;
+import org.rocksdb.RocksDBException;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.List;
+
+/**
+ * A Class that controls the standard config options of RocksDB.
+ * 
+ * Important : Some of the functions in this file are magic functions designed
+ * for the use of OZONE developers only. Due to that this information is
+ * documented in this files only and is *not* intended for end user 
consumption.
+ * Please do not use this information to tune your production environments.
+ * Please remember the SpiderMan principal; with great p

[41/50] [abbrv] hadoop git commit: Revert "HDDS-98. Adding Ozone Manager Audit Log."

2018-09-04 Thread ehiggs
Revert "HDDS-98. Adding Ozone Manager Audit Log."

This reverts commit 630b64ec7e963968a5bdcd1d625fc78746950137.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6edf3d2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6edf3d2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6edf3d2e

Branch: refs/heads/HDFS-12090
Commit: 6edf3d2ea3de3629629c21cec3b4435bd71753ba
Parents: 19abaac
Author: Nanda kumar 
Authored: Sun Sep 2 00:18:13 2018 +0530
Committer: Nanda kumar 
Committed: Sun Sep 2 00:18:13 2018 +0530

--
 .../src/main/compose/ozone/docker-config|  37 
 .../org/apache/hadoop/ozone/OzoneConsts.java|  32 ---
 hadoop-ozone/common/src/main/bin/ozone  |   2 -
 .../src/main/conf/om-audit-log4j2.properties|  86 
 .../org/apache/hadoop/ozone/audit/OMAction.java |  25 +--
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  25 +--
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  21 +-
 .../hadoop/ozone/om/helpers/OmKeyArgs.java  |  22 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java   |  16 +-
 .../apache/hadoop/ozone/om/OzoneManager.java| 218 +--
 10 files changed, 18 insertions(+), 466 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6edf3d2e/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index 21127f8..a1828a3 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -31,40 +31,3 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
-
-#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
-LOG4J2.PROPERTIES_monitorInterval=30
-LOG4J2.PROPERTIES_filter=read,write
-LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
-LOG4J2.PROPERTIES_filter.read.marker=READ
-LOG4J2.PROPERTIES_filter.read.onMatch=DENY
-LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
-LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
-LOG4J2.PROPERTIES_filter.write.marker=WRITE
-LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
-LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
-LOG4J2.PROPERTIES_appenders=console, rolling
-LOG4J2.PROPERTIES_appender.console.type=Console
-LOG4J2.PROPERTIES_appender.console.name=STDOUT
-LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
-LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
-LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
-LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
-LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
-LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
-LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
-LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
-LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
-LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
-LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
-LOG4J2.PROPERTIES_loggers=audit
-LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
-LOG4J2.PROPERTIES_logger.audit.name=OMAudit
-LOG4J2.PROPERTIES_logger.audit.level=INFO
-LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
-LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
-LOG4J2.PROPERTIES_rootLogger.level=INFO
-LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
-LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6edf3d2e/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 9645c02..15366fb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -184,36 +184,4 @@ public final cla

[49/50] [abbrv] hadoop git commit: HDFS-13310. The DatanodeProtocol should have a DNA_BACKUP to backup blocks. Original patch contributed by Ewan Higgs. Followup work and fixed contributed by Virajith

2018-09-04 Thread ehiggs
HDFS-13310. The DatanodeProtocol should have a DNA_BACKUP to backup blocks. 
Original patch contributed by Ewan Higgs. Followup work and fixed contributed 
by Virajith Jalaparthi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8cdd033a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8cdd033a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8cdd033a

Branch: refs/heads/HDFS-12090
Commit: 8cdd033a06693020c620c8057200e2da5b469ffc
Parents: 211034a
Author: Ewan Higgs 
Authored: Mon Jul 23 13:14:04 2018 +0200
Committer: Ewan Higgs 
Committed: Mon Sep 3 14:40:50 2018 +0200

--
 .../BlockSyncTaskExecutionFeedback.java |  67 ++
 .../protocol/SyncTaskExecutionOutcome.java  |  25 +++
 .../protocol/SyncTaskExecutionResult.java   |  46 
 .../DatanodeProtocolClientSideTranslatorPB.java |   8 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   6 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 208 ++-
 .../server/blockmanagement/DatanodeManager.java |   4 +-
 .../hdfs/server/datanode/BPServiceActor.java|   9 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   8 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   8 +-
 .../hdfs/server/protocol/BlockSyncTask.java |  83 
 .../protocol/BulkSyncTaskExecutionFeedback.java |  36 
 .../hdfs/server/protocol/DatanodeProtocol.java  |  20 +-
 .../hdfs/server/protocol/SyncCommand.java   |  39 
 .../src/main/proto/DatanodeProtocol.proto   |  88 +++-
 .../blockmanagement/TestDatanodeManager.java|   2 +-
 .../TestNameNodePrunesMissingStorages.java  |   2 +-
 .../datanode/InternalDataNodeTestUtils.java |   3 +-
 .../server/datanode/TestBPOfferService.java |   5 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   9 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   8 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   5 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 27 files changed, 658 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8cdd033a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
new file mode 100644
index 000..2e5393e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.util.UUID;
+
+/**
+ * Feedback for a BlockSyncTask.
+ */
+public class BlockSyncTaskExecutionFeedback {
+
+  private UUID syncTaskId;
+  private SyncTaskExecutionOutcome outcome;
+  private SyncTaskExecutionResult result;
+  private String syncMountId;
+
+  public BlockSyncTaskExecutionFeedback(UUID syncTaskId,
+  SyncTaskExecutionOutcome outcome, SyncTaskExecutionResult result,
+  String syncMountId) {
+this.syncTaskId = syncTaskId;
+this.outcome = outcome;
+this.result = result;
+this.syncMountId = syncMountId;
+  }
+
+  public static BlockSyncTaskExecutionFeedback finishedSuccessfully(
+  UUID syncTaskId, String syncMountId, SyncTaskExecutionResult result) {
+return new BlockSyncTaskExecutionFeedback(syncTaskId,
+SyncTaskExecutionOutcome.FINISHED_SUCCESSFULLY, result, syncMountId);
+  }
+
+  public static BlockSyncTaskExecutionFeedback failedWithExc

[05/50] [abbrv] hadoop git commit: HDFS-13838. WebHdfsFileSystem.getFileStatus() won't return correct "snapshot enabled" status. Contributed by Siyao Meng.

2018-09-04 Thread ehiggs
HDFS-13838. WebHdfsFileSystem.getFileStatus() won't return correct "snapshot 
enabled" status. Contributed by Siyao Meng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26c2a97c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26c2a97c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26c2a97c

Branch: refs/heads/HDFS-12090
Commit: 26c2a97c566969f50eb8e8432009724c51152a98
Parents: 602d138
Author: Wei-Chiu Chuang 
Authored: Mon Aug 27 16:02:35 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Mon Aug 27 16:02:35 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/web/JsonUtilClient.java |  4 
 .../java/org/apache/hadoop/hdfs/web/TestWebHDFS.java| 12 
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26c2a97c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 9bb1846..a685573 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -133,6 +133,7 @@ class JsonUtilClient {
 Boolean aclBit = (Boolean) m.get("aclBit");
 Boolean encBit = (Boolean) m.get("encBit");
 Boolean erasureBit  = (Boolean) m.get("ecBit");
+Boolean snapshotEnabledBit  = (Boolean) m.get("snapshotEnabled");
 EnumSet f =
 EnumSet.noneOf(HdfsFileStatus.Flags.class);
 if (aclBit != null && aclBit) {
@@ -144,6 +145,9 @@ class JsonUtilClient {
 if (erasureBit != null && erasureBit) {
   f.add(HdfsFileStatus.Flags.HAS_EC);
 }
+if (snapshotEnabledBit != null && snapshotEnabledBit) {
+  f.add(HdfsFileStatus.Flags.SNAPSHOT_ENABLED);
+}
 
 Map ecPolicyObj = (Map) m.get("ecPolicyObj");
 ErasureCodingPolicy ecPolicy = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26c2a97c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index cbc428a..9152636 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -482,6 +482,9 @@ public class TestWebHDFS {
 
   // allow snapshots on /bar using webhdfs
   webHdfs.allowSnapshot(bar);
+  // check if snapshot status is enabled
+  assertTrue(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   webHdfs.createSnapshot(bar, "s1");
   final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
   Assert.assertTrue(webHdfs.exists(s1path));
@@ -491,15 +494,24 @@ public class TestWebHDFS {
   assertEquals(bar, snapshottableDirs[0].getFullPath());
   dfs.deleteSnapshot(bar, "s1");
   dfs.disallowSnapshot(bar);
+  // check if snapshot status is disabled
+  assertFalse(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertFalse(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertNull(snapshottableDirs);
 
   // disallow snapshots on /bar using webhdfs
   dfs.allowSnapshot(bar);
+  // check if snapshot status is enabled, again
+  assertTrue(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertEquals(1, snapshottableDirs.length);
   assertEquals(bar, snapshottableDirs[0].getFullPath());
   webHdfs.disallowSnapshot(bar);
+  // check if snapshot status is disabled, again
+  assertFalse(dfs.getFileStatus(bar).isSnapshotEnabled());
+  assertFalse(webHdfs.getFileStatus(bar).isSnapshotEnabled());
   snapshottableDirs = dfs.getSnapshottableDirListing();
   assertNull(snapshottableDirs);
   try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: HDDS-380. Remove synchronization from ChunkGroupOutputStream and ChunkOutputStream. Contributed by Shashikant Banerjee.

2018-09-04 Thread ehiggs
HDDS-380. Remove synchronization from ChunkGroupOutputStream and 
ChunkOutputStream. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bd42171
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bd42171
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bd42171

Branch: refs/heads/HDFS-12090
Commit: 0bd4217194ae50ec30e386b200fcfa54c069f042
Parents: 3fa4639
Author: Nanda kumar 
Authored: Wed Aug 29 13:31:19 2018 +0530
Committer: Nanda kumar 
Committed: Wed Aug 29 13:31:19 2018 +0530

--
 .../hadoop/hdds/scm/storage/ChunkOutputStream.java  | 16 
 .../ozone/client/io/ChunkGroupOutputStream.java | 12 ++--
 2 files changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bd42171/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index f2df3fa..8d311d0 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -99,7 +99,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void write(int b) throws IOException {
+  public void write(int b) throws IOException {
 checkOpen();
 int rollbackPosition = buffer.position();
 int rollbackLimit = buffer.limit();
@@ -110,7 +110,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void write(byte[] b, int off, int len)
+  public void write(byte[] b, int off, int len)
   throws IOException {
 if (b == null) {
   throw new NullPointerException();
@@ -137,7 +137,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void flush() throws IOException {
+  public void flush() throws IOException {
 checkOpen();
 if (buffer.position() > 0) {
   int rollbackPosition = buffer.position();
@@ -147,7 +147,7 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void close() throws IOException {
+  public void close() throws IOException {
 if (xceiverClientManager != null && xceiverClient != null
 && buffer != null) {
   if (buffer.position() > 0) {
@@ -164,7 +164,7 @@ public class ChunkOutputStream extends OutputStream {
 }
   }
 
-  public synchronized void cleanup() {
+  public void cleanup() {
 xceiverClientManager.releaseClient(xceiverClient);
 xceiverClientManager = null;
 xceiverClient = null;
@@ -176,7 +176,7 @@ public class ChunkOutputStream extends OutputStream {
*
* @throws IOException if stream is closed
*/
-  private synchronized void checkOpen() throws IOException {
+  private void checkOpen() throws IOException {
 if (xceiverClient == null) {
   throw new IOException("ChunkOutputStream has been closed.");
 }
@@ -191,7 +191,7 @@ public class ChunkOutputStream extends OutputStream {
* @param rollbackLimit limit to restore in buffer if write fails
* @throws IOException if there is an I/O error while performing the call
*/
-  private synchronized void flushBufferToChunk(int rollbackPosition,
+  private void flushBufferToChunk(int rollbackPosition,
   int rollbackLimit) throws IOException {
 boolean success = false;
 try {
@@ -213,7 +213,7 @@ public class ChunkOutputStream extends OutputStream {
*
* @throws IOException if there is an I/O error while performing the call
*/
-  private synchronized void writeChunkToContainer() throws IOException {
+  private void writeChunkToContainer() throws IOException {
 buffer.flip();
 ByteString data = ByteString.copyFrom(buffer);
 ChunkInfo chunk = ChunkInfo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bd42171/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 988af07..00624d5 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -105,7 +105,7 @@ public c

[31/50] [abbrv] hadoop git commit: HADOOP-15680. ITestNativeAzureFileSystemConcurrencyLive times out. Contributed by Andras Bokor.

2018-09-04 Thread ehiggs
HADOOP-15680. ITestNativeAzureFileSystemConcurrencyLive times out.
Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8d138ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8d138ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8d138ca

Branch: refs/heads/HDFS-12090
Commit: e8d138ca7c1b695688515d816ac693437c87df62
Parents: 2e6c110
Author: Steve Loughran 
Authored: Thu Aug 30 14:36:00 2018 +0100
Committer: Steve Loughran 
Committed: Thu Aug 30 14:36:00 2018 +0100

--
 .../hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8d138ca/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
index 87cac15..1c868ea 100644
--- 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemConcurrencyLive.java
@@ -39,7 +39,7 @@ public class ITestNativeAzureFileSystemConcurrencyLive
 extends AbstractWasbTestBase {
 
   private static final int THREAD_COUNT = 102;
-  private static final int TEST_EXECUTION_TIMEOUT = 5000;
+  private static final int TEST_EXECUTION_TIMEOUT = 3;
 
   @Override
   protected AzureBlobStorageTestAccount createTestAccount() throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: HDDS-332. Remove the ability to configure ozone.handler.type Contributed by Nandakumar and Anu Engineer.

2018-09-04 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/df21e1b1/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
--
diff --git 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
index 2200cd8..f56cbe8 100644
--- 
a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
+++ 
b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
@@ -1,64 +1,58 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
-import static 
com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
-import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.Map;
-
 import com.sun.jersey.api.container.ContainerFactory;
 import com.sun.jersey.api.core.ApplicationAdapter;
-
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import 
org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
+import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
 import 
org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.web.ObjectStoreApplication;
 import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
-import org.apache.hadoop.hdds.scm.protocolPB
-.ScmBlockLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.protocolPB
-.StorageContainerLocationProtocolClientSideTranslatorPB;
-import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import 

[07/50] [abbrv] hadoop git commit: HDFS-13858. RBF: Add check to have single valid argument to safemode command. Contributed by Ayush Saxena.

2018-09-04 Thread ehiggs
HDFS-13858. RBF: Add check to have single valid argument to safemode command. 
Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75691ad6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75691ad6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75691ad6

Branch: refs/heads/HDFS-12090
Commit: 75691ad600473d4d315434b0876d6d10d3050a6b
Parents: 3974427
Author: Vinayakumar B 
Authored: Tue Aug 28 09:21:07 2018 +0530
Committer: Vinayakumar B 
Committed: Tue Aug 28 09:21:07 2018 +0530

--
 .../hadoop/hdfs/tools/federation/RouterAdmin.java |  6 ++
 .../server/federation/router/TestRouterAdminCLI.java  | 14 ++
 2 files changed, 20 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75691ad6/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 91e1669..f88d0a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -218,6 +218,10 @@ public class RouterAdmin extends Configured implements 
Tool {
   "Successfully clear quota for mount point " + argv[i]);
 }
   } else if ("-safemode".equals(cmd)) {
+if (argv.length > 2) {
+  throw new IllegalArgumentException(
+  "Too many arguments, Max=1 argument allowed only");
+}
 manageSafeMode(argv[i]);
   } else if ("-nameservice".equals(cmd)) {
 String subcmd = argv[i];
@@ -712,6 +716,8 @@ public class RouterAdmin extends Configured implements Tool 
{
 } else if (cmd.equals("get")) {
   boolean result = getSafeMode();
   System.out.println("Safe Mode: " + result);
+} else {
+  throw new IllegalArgumentException("Invalid argument: " + cmd);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75691ad6/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
index 2da5fb9..2682e9a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
@@ -519,6 +519,7 @@ public class TestRouterAdminCLI {
 assertTrue(routerContext.getRouter().getSafemodeService().isInSafeMode());
 
 System.setOut(new PrintStream(out));
+System.setErr(new PrintStream(err));
 assertEquals(0, ToolRunner.run(admin,
 new String[] {"-safemode", "get"}));
 assertTrue(out.toString().contains("true"));
@@ -534,6 +535,19 @@ public class TestRouterAdminCLI {
 assertEquals(0, ToolRunner.run(admin,
 new String[] {"-safemode", "get"}));
 assertTrue(out.toString().contains("false"));
+
+out.reset();
+assertEquals(-1, ToolRunner.run(admin,
+new String[] {"-safemode", "get", "-random", "check" }));
+assertTrue(err.toString(), err.toString()
+.contains("safemode: Too many arguments, Max=1 argument allowed 
only"));
+err.reset();
+
+assertEquals(-1,
+ToolRunner.run(admin, new String[] {"-safemode", "check" }));
+assertTrue(err.toString(),
+err.toString().contains("safemode: Invalid argument: check"));
+err.reset();
   }
 
   @Test


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HADOOP-15706. Typo in compatibility doc: SHOUD -> SHOULD (Contributed by Laszlo Kollar via Daniel Templeton)

2018-09-04 Thread ehiggs
HADOOP-15706. Typo in compatibility doc: SHOUD -> SHOULD
(Contributed by Laszlo Kollar via Daniel Templeton)

Change-Id: I6e2459d0700df7f3bad4eac8297a11690191c3ba


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2c2a68e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2c2a68e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2c2a68e

Branch: refs/heads/HDFS-12090
Commit: f2c2a68ec208f640e778fc41f95f0284fcc44729
Parents: 5a0babf
Author: Daniel Templeton 
Authored: Thu Aug 30 09:12:36 2018 -0700
Committer: Daniel Templeton 
Committed: Thu Aug 30 09:12:36 2018 -0700

--
 .../hadoop-common/src/site/markdown/Compatibility.md   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2c2a68e/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 6b17c62..03d162a 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -187,7 +187,7 @@ existing documentation and tests and/or adding new 
documentation or tests.
 
  Java Binary compatibility for end-user applications i.e. Apache Hadoop ABI
 
-Apache Hadoop revisions SHOUD retain binary compatability such that end-user
+Apache Hadoop revisions SHOULD retain binary compatability such that end-user
 applications continue to work without any modifications. Minor Apache Hadoop
 revisions within the same major revision MUST retain compatibility such that
 existing MapReduce applications (e.g. end-user applications and projects such


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HADOOP-15705. Typo in the definition of "stable" in the interface classification

2018-09-04 Thread ehiggs
HADOOP-15705. Typo in the definition of "stable" in the interface classification

Change-Id: I3eae2143400a534903db4f186400561fc8d2bd56


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d53a10b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d53a10b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d53a10b0

Branch: refs/heads/HDFS-12090
Commit: d53a10b0a552155de700e396fd7f450a4c5f9c22
Parents: 692736f
Author: Daniel Templeton 
Authored: Wed Aug 29 13:59:32 2018 -0700
Committer: Daniel Templeton 
Committed: Wed Aug 29 13:59:32 2018 -0700

--
 .../hadoop-common/src/site/markdown/InterfaceClassification.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d53a10b0/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
index a21e28b..7348044 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/InterfaceClassification.md
@@ -124,7 +124,7 @@ hence serves as a safe development target. A Stable 
interface may evolve
 compatibly between minor releases.
 
 Incompatible changes allowed: major (X.0.0)
-Compatible changes allowed: maintenance (x.Y.0)
+Compatible changes allowed: maintenance (x.y.Z)
 
  Evolving
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: HDDS-357. Use DBStore and TableStore for OzoneManager non-background service. Contributed by Nandakumar.

2018-09-04 Thread ehiggs
HDDS-357. Use DBStore and TableStore for OzoneManager non-background service.
Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff036e49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff036e49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff036e49

Branch: refs/heads/HDFS-12090
Commit: ff036e49ff967d5dacf4b2d9d5376e57578ef391
Parents: eed8415
Author: Anu Engineer 
Authored: Sun Sep 2 11:47:32 2018 -0700
Committer: Anu Engineer 
Committed: Sun Sep 2 11:47:32 2018 -0700

--
 .../org/apache/hadoop/ozone/OzoneConsts.java|   6 +-
 .../org/apache/hadoop/utils/RocksDBStore.java   |   2 +-
 .../org/apache/hadoop/utils/db/DBStore.java |  22 +
 .../org/apache/hadoop/utils/db/RDBStore.java|  26 +-
 .../common/src/main/resources/ozone-default.xml |   2 +-
 .../apache/hadoop/hdds/server/ServerUtils.java  |   5 +
 .../ozone/client/io/ChunkGroupOutputStream.java |   4 +-
 .../hadoop/ozone/om/helpers/OpenKeySession.java |   6 +-
 .../ozone/om/protocol/OzoneManagerProtocol.java |  11 +-
 ...neManagerProtocolClientSideTranslatorPB.java |   8 +-
 .../src/main/proto/OzoneManagerProtocol.proto   |   6 +-
 .../rpc/TestCloseContainerHandlingByClient.java |  37 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java|   4 +
 .../apache/hadoop/ozone/om/TestOmSQLCli.java|   7 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |  37 +-
 .../hadoop/ozone/web/client/TestVolume.java |   6 +
 .../hadoop/ozone/om/BucketManagerImpl.java  |  57 ++-
 .../org/apache/hadoop/ozone/om/KeyManager.java  |   6 +-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  | 276 +-
 .../hadoop/ozone/om/OMMetadataManager.java  | 222 
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  | 509 +++
 .../apache/hadoop/ozone/om/OzoneManager.java| 209 
 .../hadoop/ozone/om/VolumeManagerImpl.java  | 156 +++---
 ...neManagerProtocolServerSideTranslatorPB.java |   7 +-
 .../hadoop/ozone/om/TestBucketManagerImpl.java  | 208 
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |  12 +-
 26 files changed, 978 insertions(+), 873 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff036e49/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 15366fb..8ea4d7f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -92,7 +92,6 @@ public final class OzoneConsts {
   public static final String CONTAINER_DB_SUFFIX = "container.db";
   public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
   public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
-  public static final String BLOCK_DB = "block.db";
   public static final String OPEN_CONTAINERS_DB = "openContainers.db";
   public static final String DELETED_BLOCK_DB = "deletedBlock.db";
   public static final String OM_DB_NAME = "om.db";
@@ -113,8 +112,6 @@ public final class OzoneConsts {
   public static final String DELETING_KEY_PREFIX = "#deleting#";
   public static final String DELETED_KEY_PREFIX = "#deleted#";
   public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
-  public static final String OPEN_KEY_PREFIX = "#open#";
-  public static final String OPEN_KEY_ID_DELIMINATOR = "#";
 
   /**
* OM LevelDB prefixes.
@@ -138,8 +135,7 @@ public final class OzoneConsts {
*  | #deleting#/volumeName/bucketName/keyName |  KeyInfo|
*  --
*/
-  public static final String OM_VOLUME_PREFIX = "/#";
-  public static final String OM_BUCKET_PREFIX = "/#";
+
   public static final String OM_KEY_PREFIX = "/";
   public static final String OM_USER_PREFIX = "$";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff036e49/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
index b243e3d..379d9e9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java
@@ -94,7 +94,7 @@ public class RocksDBStore implements MetadataStore {
 }
   }
 
-  private IOException toIOException(String msg, RocksDBException e) {
+  public static IOException to

[26/50] [abbrv] hadoop git commit: HDDS-280. Support ozone dist-start-stitching on openbsd/osx. Contributed by Elek, Marton.

2018-09-04 Thread ehiggs
HDDS-280. Support ozone dist-start-stitching on openbsd/osx. Contributed by 
Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/692736f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/692736f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/692736f7

Branch: refs/heads/HDFS-12090
Commit: 692736f7cfb72b8932dc2eb4f4faa995dc6521f8
Parents: 7362516
Author: Mukul Kumar Singh 
Authored: Thu Aug 30 02:21:24 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Thu Aug 30 02:21:24 2018 +0530

--
 dev-support/bin/ozone-dist-layout-stitching   |  6 +++---
 dev-support/bin/ozone-dist-tar-stitching  |  9 ++---
 hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh |  2 +-
 .../acceptance-test/dev-support/bin/robot-dnd-all.sh  | 10 ++
 hadoop-ozone/acceptance-test/dev-support/bin/robot.sh |  7 ---
 hadoop-ozone/acceptance-test/pom.xml  |  7 +++
 .../src/test/acceptance/basic/ozone-shell.robot   |  1 -
 .../acceptance-test/src/test/acceptance/commonlib.robot   |  2 +-
 hadoop-ozone/common/pom.xml   |  5 +
 hadoop-ozone/docs/content/GettingStarted.md   |  3 ++-
 hadoop-ozone/pom.xml  |  5 +
 11 files changed, 24 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/dev-support/bin/ozone-dist-layout-stitching
--
diff --git a/dev-support/bin/ozone-dist-layout-stitching 
b/dev-support/bin/ozone-dist-layout-stitching
index 2ba7791..1ba652c 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -117,9 +117,9 @@ ROOT=$(cd "${BASEDIR}"/../..;pwd)
 echo
 echo "Current directory $(pwd)"
 echo
-run rm -rf "ozone"
-run mkdir "ozone"
-run cd "ozone"
+run rm -rf "ozone-${HDDS_VERSION}"
+run mkdir "ozone-${HDDS_VERSION}"
+run cd "ozone-${HDDS_VERSION}"
 run cp -p "${ROOT}/LICENSE.txt" .
 run cp -p "${ROOT}/NOTICE.txt" .
 run cp -p "${ROOT}/README.txt" .

http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/dev-support/bin/ozone-dist-tar-stitching
--
diff --git a/dev-support/bin/ozone-dist-tar-stitching 
b/dev-support/bin/ozone-dist-tar-stitching
index d1116e4..93d0525 100755
--- a/dev-support/bin/ozone-dist-tar-stitching
+++ b/dev-support/bin/ozone-dist-tar-stitching
@@ -36,13 +36,8 @@ function run()
   fi
 }
 
-#To make the final dist directory easily mountable from docker we don't use
-#version name in the directory name.
-#To include the version name in the root directory of the tar file
-# we create a symbolic link and dereference it during the tar creation
-ln -s -f ozone ozone-${VERSION}
-run tar -c --dereference -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
+run tar -c -f "ozone-${VERSION}.tar" "ozone-${VERSION}"
 run gzip -f "ozone-${VERSION}.tar"
 echo
 echo "Ozone dist tar available at: ${BASEDIR}/ozone-${VERSION}.tar.gz"
-echo
\ No newline at end of file
+echo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
--
diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh 
b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
index ee9c6b8..87b7137 100755
--- a/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
+++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-all.sh
@@ -15,4 +15,4 @@
 # limitations under the License.
 
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-$DIR/robot.sh $DIR/../../src/test/acceptance
+"$DIR/robot.sh" "$DIR/../../src/test/acceptance"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/692736f7/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
--
diff --git a/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh 
b/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
index 9f1d367..052ffb3 100755
--- a/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
+++ b/hadoop-ozone/acceptance-test/dev-support/bin/robot-dnd-all.sh
@@ -18,15 +18,9 @@ set -x
 
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 
-#Dir od the definition of the dind based test exeucution container
-DOCKERDIR="$DIR/../docker"
-
 #Dir to save the results
 TARGETDIR="$DIR/../../target/dnd"
 
-#Dir to mount the distribution from
-OZONEDIST="$DIR/../../../../hadoop-dist/target/ozone"
-
 #Name and imagename of the temporary, dind based test containers
 DOCKER_IMAGE_NAME=ozoneacceptance
 DOCKER_INSTANCE_NAME="${DOC

[37/50] [abbrv] hadoop git commit: HDDS-388. Fix the name of the db profile configuration key. Contributed by Elek, Marton.

2018-09-04 Thread ehiggs
HDDS-388. Fix the name of the db profile configuration key.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50d2e3ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50d2e3ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50d2e3ec

Branch: refs/heads/HDFS-12090
Commit: 50d2e3ec41c73f9a0198d4a4e3d6f308d3030b8a
Parents: 630b64e
Author: Anu Engineer 
Authored: Fri Aug 31 14:30:29 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 14:30:29 2018 -0700

--
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50d2e3ec/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 6d2ee09..d3ec4a5 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1100,7 +1100,7 @@
   
 
   
-ozone.db.profile
+hdds.db.profile
 DBProfile.SSD
 OZONE, OM, PERFORMANCE, REQUIRED
 This property allows user to pick a configuration


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: YARN-8488. Added SUCCEEDED/FAILED states to YARN service. Contributed by Suma Shivaprasad

2018-09-04 Thread ehiggs
YARN-8488.  Added SUCCEEDED/FAILED states to YARN service.
Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd089caf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd089caf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd089caf

Branch: refs/heads/HDFS-12090
Commit: fd089caf69cf608a91564c9c3d20cbf84e7fd60c
Parents: c61824a
Author: Eric Yang 
Authored: Tue Aug 28 13:55:28 2018 -0400
Committer: Eric Yang 
Committed: Tue Aug 28 13:55:28 2018 -0400

--
 .../hadoop/yarn/service/ServiceScheduler.java   | 100 ++---
 .../service/api/records/ComponentState.java |   2 +-
 .../service/api/records/ContainerState.java |   3 +-
 .../yarn/service/api/records/ServiceState.java  |   2 +-
 .../component/instance/ComponentInstance.java   | 144 ++-
 .../timelineservice/ServiceTimelineEvent.java   |   5 +-
 .../ServiceTimelinePublisher.java   |  33 -
 .../yarn/service/MockRunningServiceContext.java |  18 ++-
 .../hadoop/yarn/service/ServiceTestUtils.java   |   9 +-
 .../yarn/service/component/TestComponent.java   |  55 ++-
 .../component/TestComponentRestartPolicy.java   |   1 -
 .../instance/TestComponentInstance.java |  35 ++---
 .../TestServiceTimelinePublisher.java   |   4 +-
 13 files changed, 322 insertions(+), 89 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd089caf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index 384659f..b49ef2a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.service.api.ServiceApiConstants;
+import org.apache.hadoop.yarn.service.api.records.ContainerState;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.api.records.ConfigFile;
@@ -80,6 +81,8 @@ import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
 import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.apache.hadoop.yarn.util.BoundedAppender;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -102,7 +105,8 @@ import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.registry.client.api.RegistryConstants.*;
-import static 
org.apache.hadoop.yarn.api.records.ContainerExitStatus.KILLED_AFTER_APP_COMPLETION;
+import static org.apache.hadoop.yarn.api.records.ContainerExitStatus
+.KILLED_AFTER_APP_COMPLETION;
 import static org.apache.hadoop.yarn.service.api.ServiceApiConstants.*;
 import static org.apache.hadoop.yarn.service.component.ComponentEventType.*;
 import static org.apache.hadoop.yarn.service.exceptions.LauncherExitCodes
@@ -137,6 +141,8 @@ public class ServiceScheduler extends CompositeService {
 
   private ServiceTimelinePublisher serviceTimelinePublisher;
 
+  private boolean timelineServiceEnabled;
+
   // Global diagnostics that will be reported to RM on eRxit.
   // The unit the number of characters. This will be limited to 64 * 1024
   // characters.
@@ -169,6 +175,8 @@ public class ServiceScheduler extends CompositeService {
   private volatile FinalApplicationStatus finalApplicationStatus =
   FinalApplicationStatus.ENDED;
 
+  private Clock systemClock;
+
   // For unit test override since we don't want to terminate UT process.
   private ServiceUtils.ProcessTerminationHandler
   terminationHandler = new ServiceUtils.ProcessTerminationHandler();
@@ -

[08/50] [abbrv] hadoop git commit: HDDS-381. Fix TestKeys#testPutAndGetKeyWithDnRestart. Contributed by Mukul Kumar Singh.

2018-09-04 Thread ehiggs
HDDS-381. Fix TestKeys#testPutAndGetKeyWithDnRestart. Contributed by Mukul 
Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2172399c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2172399c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2172399c

Branch: refs/heads/HDFS-12090
Commit: 2172399c55b481ea0da8cf2e2cb91ea6d8140b27
Parents: 75691ad
Author: Nanda kumar 
Authored: Tue Aug 28 22:19:52 2018 +0530
Committer: Nanda kumar 
Committed: Tue Aug 28 22:19:52 2018 +0530

--
 .../common/transport/server/GrpcXceiverService.java|  8 +++-
 .../java/org/apache/hadoop/ozone/MiniOzoneCluster.java |  3 ++-
 .../org/apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 13 +++--
 .../statemachine/commandhandler/TestBlockDeletion.java |  9 +++--
 .../org/apache/hadoop/ozone/web/client/TestKeys.java   | 11 ---
 5 files changed, 27 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2172399c/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
index df6220c..db4a86a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java
@@ -56,10 +56,8 @@ public class GrpcXceiverService extends
   ContainerCommandResponseProto resp = dispatcher.dispatch(request);
   responseObserver.onNext(resp);
 } catch (Throwable e) {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("{} got exception when processing"
+  LOG.error("{} got exception when processing"
 + " ContainerCommandRequestProto {}: {}", request, e);
-  }
   responseObserver.onError(e);
 }
   }
@@ -67,13 +65,13 @@ public class GrpcXceiverService extends
   @Override
   public void onError(Throwable t) {
 // for now we just log a msg
-LOG.info("{}: ContainerCommand send on error. Exception: {}", t);
+LOG.error("{}: ContainerCommand send on error. Exception: {}", t);
   }
 
   @Override
   public void onCompleted() {
 if (isClosed.compareAndSet(false, true)) {
-  LOG.info("{}: ContainerCommand send completed");
+  LOG.debug("{}: ContainerCommand send completed");
   responseObserver.onCompleted();
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2172399c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index b568672..ae6a91e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -152,7 +152,8 @@ public interface MiniOzoneCluster {
*
* @param i index of HddsDatanode in the MiniOzoneCluster
*/
-  void restartHddsDatanode(int i);
+  void restartHddsDatanode(int i) throws InterruptedException,
+  TimeoutException;
 
   /**
* Shutdown a particular HddsDatanode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2172399c/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 9b7e399..e06e2f6 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -216,7 +216,8 @@ public final class MiniOzoneClusterImpl implements 
MiniOzoneCluster {
   }
 
   @Override
-  public void restartHddsDatanode(int i) {
+  public void restartHddsDatanode(int i) throws InterruptedException,
+  TimeoutException {
 HddsDatanodeService datanodeService = hddsDatanodes.get

[06/50] [abbrv] hadoop git commit: HDDS-247. Handle CLOSED_CONTAINER_IO exception in ozoneClient. Contributed by Shashikant Banerjee.

2018-09-04 Thread ehiggs
HDDS-247. Handle CLOSED_CONTAINER_IO exception in ozoneClient. Contributed by 
Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3974427f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3974427f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3974427f

Branch: refs/heads/HDFS-12090
Commit: 3974427f67299496e13b04f0d006d367b705fcb5
Parents: 26c2a97
Author: Mukul Kumar Singh 
Authored: Tue Aug 28 07:11:36 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Tue Aug 28 07:12:07 2018 +0530

--
 .../hdds/scm/storage/ChunkOutputStream.java |  28 +-
 .../ozone/client/io/ChunkGroupOutputStream.java | 195 +++--
 .../hadoop/ozone/om/helpers/OmKeyInfo.java  |  23 +-
 .../rpc/TestCloseContainerHandlingByClient.java | 408 +++
 .../ozone/container/ContainerTestHelper.java|  21 +
 .../hadoop/ozone/om/TestOmBlockVersioning.java  |  16 +-
 6 files changed, 630 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3974427f/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 779e636..7309434 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -94,6 +94,10 @@ public class ChunkOutputStream extends OutputStream {
 this.chunkIndex = 0;
   }
 
+  public ByteBuffer getBuffer() {
+return buffer;
+  }
+
   @Override
   public synchronized void write(int b) throws IOException {
 checkOpen();
@@ -106,7 +110,8 @@ public class ChunkOutputStream extends OutputStream {
   }
 
   @Override
-  public void write(byte[] b, int off, int len) throws IOException {
+  public synchronized void write(byte[] b, int off, int len)
+  throws IOException {
 if (b == null) {
   throw new NullPointerException();
 }
@@ -143,24 +148,27 @@ public class ChunkOutputStream extends OutputStream {
 
   @Override
   public synchronized void close() throws IOException {
-if (xceiverClientManager != null && xceiverClient != null &&
-buffer != null) {
+if (xceiverClientManager != null && xceiverClient != null
+&& buffer != null) {
+  if (buffer.position() > 0) {
+writeChunkToContainer();
+  }
   try {
-if (buffer.position() > 0) {
-  writeChunkToContainer();
-}
 putKey(xceiverClient, containerKeyData.build(), traceID);
   } catch (IOException e) {
 throw new IOException(
 "Unexpected Storage Container Exception: " + e.toString(), e);
   } finally {
-xceiverClientManager.releaseClient(xceiverClient);
-xceiverClientManager = null;
-xceiverClient = null;
-buffer = null;
+cleanup();
   }
 }
+  }
 
+  public synchronized void cleanup() {
+xceiverClientManager.releaseClient(xceiverClient);
+xceiverClientManager = null;
+xceiverClient = null;
+buffer = null;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3974427f/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
--
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 83b4dfd..988af07 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -27,6 +27,7 @@ import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -46,8 +47,10 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.

[02/50] [abbrv] hadoop git commit: HDSS-375. ContainerReportHandler should not send replication events for open containers. Contributed by Ajay Kumar.

2018-09-04 Thread ehiggs
HDSS-375. ContainerReportHandler should not send replication events for open 
containers. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9b63956
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9b63956
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9b63956

Branch: refs/heads/HDFS-12090
Commit: c9b63956d97521ec21a051bfcbbf4b79262ea16f
Parents: f152582
Author: Xiaoyu Yao 
Authored: Mon Aug 27 10:39:30 2018 -0700
Committer: Xiaoyu Yao 
Committed: Mon Aug 27 10:40:33 2018 -0700

--
 .../scm/container/ContainerReportHandler.java   |  4 ++
 .../container/TestContainerReportHandler.java   | 40 +++-
 2 files changed, 34 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9b63956/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 5a9e726..5ca2bcb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -129,6 +129,10 @@ public class ContainerReportHandler implements
   "Container is missing from containerStateManager. Can't request "
   + "replication. {}",
   containerID);
+  return;
+}
+if (container.isContainerOpen()) {
+  return;
 }
 if (replicationStatus.isReplicationEnabled()) {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9b63956/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index e7b6cd9..443b4b2 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -84,6 +84,7 @@ public class TestContainerReportHandler implements 
EventPublisher {
 new Builder()
 .setReplicationFactor(ReplicationFactor.THREE)
 .setContainerID((Long) invocation.getArguments()[0])
+.setState(LifeCycleState.CLOSED)
 .build()
 );
 
@@ -116,26 +117,45 @@ public class TestContainerReportHandler implements 
EventPublisher {
 when(pipelineSelector.getReplicationPipeline(ReplicationType.STAND_ALONE,
 ReplicationFactor.THREE)).thenReturn(pipeline);
 
-long c1 = containerStateManager
+ContainerInfo cont1 = containerStateManager
 .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
-ReplicationFactor.THREE, "root").getContainerInfo()
-.getContainerID();
-
-long c2 = containerStateManager
+ReplicationFactor.THREE, "root").getContainerInfo();
+ContainerInfo cont2 = containerStateManager
 .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
-ReplicationFactor.THREE, "root").getContainerInfo()
-.getContainerID();
-
+ReplicationFactor.THREE, "root").getContainerInfo();
+// Open Container
+ContainerInfo cont3 = containerStateManager
+.allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
+ReplicationFactor.THREE, "root").getContainerInfo();
+
+long c1 = cont1.getContainerID();
+long c2 = cont2.getContainerID();
+long c3 = cont3.getContainerID();
+
+// Close remaining containers
+try {
+  containerStateManager.getContainerStateMap()
+  .updateState(cont1, cont1.getState(), LifeCycleState.CLOSING);
+  containerStateManager.getContainerStateMap()
+  .updateState(cont1, cont1.getState(), LifeCycleState.CLOSED);
+  containerStateManager.getContainerStateMap()
+  .updateState(cont2, cont2.getState(), LifeCycleState.CLOSING);
+  containerStateManager.getContainerStateMap()
+  .updateState(cont2, cont2.getState(), LifeCycleState.CLOSED);
+
+} catch (IOException e) {
+  LOG.info("Failed to change state of open containers.", e);
+}
 //when
 
 //initial reports before replication is enabled. 2 conta

[19/50] [abbrv] hadoop git commit: YARN-8697. LocalityMulticastAMRMProxyPolicy should fallback to random sub-cluster when cannot resolve resource. Contributed by Botong Huang.

2018-09-04 Thread ehiggs
YARN-8697. LocalityMulticastAMRMProxyPolicy should fallback to random 
sub-cluster when cannot resolve resource. Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ed458b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ed458b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ed458b2

Branch: refs/heads/HDFS-12090
Commit: 7ed458b255e492fd5bc2ca36f216ff1b16054db7
Parents: 3e18b95
Author: Giovanni Matteo Fumarola 
Authored: Tue Aug 28 16:01:35 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Tue Aug 28 16:01:35 2018 -0700

--
 .../LocalityMulticastAMRMProxyPolicy.java   | 105 +++
 .../TestLocalityMulticastAMRMProxyPolicy.java   |  53 --
 2 files changed, 125 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ed458b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 1ccd61c..e5f26d8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
@@ -21,8 +21,11 @@ package 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
@@ -123,6 +126,8 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
   public static final Logger LOG =
   LoggerFactory.getLogger(LocalityMulticastAMRMProxyPolicy.class);
 
+  private static Random rand = new Random();
+
   private Map weights;
   private SubClusterResolver resolver;
 
@@ -275,26 +280,18 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
   }
 
   // Handle node/rack requests that the SubClusterResolver cannot map to
-  // any cluster. Defaulting to home subcluster.
+  // any cluster. Pick a random sub-cluster from active and enabled ones.
+  targetId = getSubClusterForUnResolvedRequest(bookkeeper,
+  rr.getAllocationRequestId());
   if (LOG.isDebugEnabled()) {
 LOG.debug("ERROR resolving sub-cluster for resourceName: "
-+ rr.getResourceName() + " we are falling back to homeSubCluster:"
-+ homeSubcluster);
++ rr.getResourceName() + ", picked a random subcluster to forward:"
++ targetId);
   }
-
-  // If home-subcluster is not active, ignore node/rack request
-  if (bookkeeper.isActiveAndEnabled(homeSubcluster)) {
-if (targetIds != null && targetIds.size() > 0) {
-  bookkeeper.addRackRR(homeSubcluster, rr);
-} else {
-  bookkeeper.addLocalizedNodeRR(homeSubcluster, rr);
-}
+  if (targetIds != null && targetIds.size() > 0) {
+bookkeeper.addRackRR(targetId, rr);
   } else {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("The homeSubCluster (" + homeSubcluster + ") we are "
-  + "defaulting to is not active, the ResourceRequest "
-  + "will be ignored.");
-}
+bookkeeper.addLocalizedNodeRR(targetId, rr);
   }
 }
 
@@ -314,6 +311,14 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
   }
 
   /**
+   * For unit test to override.
+   */
+  protected SubClusterId getSubClusterForUnResolvedRequest(
+  AllocationBookkeeper bookKeeper, long allocationId) {
+return bookKeeper.getSubClusterForUnResolvedRequest(allocationId);
+  }
+
+  /**
* It splits a list of non-localized resource requests among sub-clusters.
*/
   private void splitAnyRequests(List originalResourceRequests,
@@ -512,10 +517,11 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
* This 

[36/50] [abbrv] hadoop git commit: HDDS-98. Adding Ozone Manager Audit Log. Contributed by Dinesh Chitlangia.

2018-09-04 Thread ehiggs
HDDS-98. Adding Ozone Manager Audit Log.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/630b64ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/630b64ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/630b64ec

Branch: refs/heads/HDFS-12090
Commit: 630b64ec7e963968a5bdcd1d625fc78746950137
Parents: 8aa6c4f
Author: Anu Engineer 
Authored: Fri Aug 31 14:20:56 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 14:20:56 2018 -0700

--
 .../src/main/compose/ozone/docker-config|  37 
 .../org/apache/hadoop/ozone/OzoneConsts.java|  32 +++
 hadoop-ozone/common/src/main/bin/ozone  |   2 +
 .../src/main/conf/om-audit-log4j2.properties|  86 
 .../org/apache/hadoop/ozone/audit/OMAction.java |  25 ++-
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  25 ++-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  21 +-
 .../hadoop/ozone/om/helpers/OmKeyArgs.java  |  22 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java   |  16 +-
 .../apache/hadoop/ozone/om/OzoneManager.java| 218 ++-
 10 files changed, 466 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/630b64ec/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index a1828a3..21127f8 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -31,3 +31,40 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd 
HH:mm:ss} %-5p %c{1}:%L - %m%n
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
+
+#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
+LOG4J2.PROPERTIES_monitorInterval=30
+LOG4J2.PROPERTIES_filter=read,write
+LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.read.marker=READ
+LOG4J2.PROPERTIES_filter.read.onMatch=DENY
+LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.write.marker=WRITE
+LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_appenders=console, rolling
+LOG4J2.PROPERTIES_appender.console.type=Console
+LOG4J2.PROPERTIES_appender.console.name=STDOUT
+LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
+LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
+LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
+LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
+LOG4J2.PROPERTIES_loggers=audit
+LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
+LOG4J2.PROPERTIES_logger.audit.name=OMAudit
+LOG4J2.PROPERTIES_logger.audit.level=INFO
+LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
+LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
+LOG4J2.PROPERTIES_rootLogger.level=INFO
+LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
+LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/630b64ec/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 15366fb..9645c02 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -184,4 +184,36 @@ public final class OzoneConsts {
   public static 

[50/50] [abbrv] hadoop git commit: HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. Contributed by Ewan Higgs.

2018-09-04 Thread ehiggs
HDFS-13421. [PROVIDED Phase 2] Implement DNA_BACKUP command in Datanode. 
Contributed by Ewan Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06477abc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06477abc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06477abc

Branch: refs/heads/HDFS-12090
Commit: 06477abcd93eb988b4afd0a2dff549e67e0dbd85
Parents: 8cdd033
Author: Virajith Jalaparti 
Authored: Wed Aug 1 12:13:31 2018 -0700
Committer: Ewan Higgs 
Committed: Mon Sep 3 14:42:33 2018 +0200

--
 .../apache/hadoop/hdfs/BlockInputStream.java|  52 
 .../hdfs/server/datanode/BPOfferService.java|   6 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  18 +++
 .../SyncServiceSatisfierDatanodeWorker.java |  97 +++
 .../SyncTaskExecutionFeedbackCollector.java |  54 
 .../executor/BlockSyncOperationExecutor.java| 122 +++
 .../executor/BlockSyncReaderFactory.java|  92 ++
 .../executor/BlockSyncTaskRunner.java   |  69 +++
 .../hadoop/hdfs/TestBlockInputStream.java   |  84 +
 .../TestBlockSyncOperationExecutor.java |  94 ++
 10 files changed, 688 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06477abc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
new file mode 100644
index 000..152f83e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockInputStream.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * Facade around BlockReader that indeed implements the InputStream interface.
+ */
+public class BlockInputStream extends InputStream {
+  private final BlockReader blockReader;
+
+  public BlockInputStream(BlockReader blockReader) {
+this.blockReader = blockReader;
+  }
+
+  @Override
+  public int read() throws IOException {
+byte[] b = new byte[1];
+int c = blockReader.read(b, 0, b.length);
+if (c > 0) {
+  return b[0];
+} else {
+  return -1;
+}
+  }
+
+  @Override
+  public int read(byte b[], int off, int len) throws IOException {
+return blockReader.read(b, off, len);
+  }
+
+  @Override
+  public long skip(long n) throws IOException {
+return blockReader.skip(n);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06477abc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index a25f6a9..b8eef5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -795,6 +795,12 @@ class BPOfferService {
   ((BlockECReconstructionCommand) cmd).getECTasks();
   dn.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
   break;
+case DatanodeProtocol.DNA_BACKUP:
+  LOG.info("DatanodeCommand action: DNA_BACKUP");
+  Collection backupTasks =
+  ((SyncCommand) cmd).getSyncTasks();
+  dn.getSyncServiceSatisfierDatanodeWorker().processSyncTasks(backupTasks);
+  break;
 default:
   LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
 }

http://git-wip-us

[10/50] [abbrv] hadoop git commit: HDDS-332. Remove the ability to configure ozone.handler.type Contributed by Nandakumar and Anu Engineer.

2018-09-04 Thread ehiggs
HDDS-332. Remove the ability to configure ozone.handler.type
Contributed by Nandakumar and Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df21e1b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df21e1b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df21e1b1

Branch: refs/heads/HDFS-12090
Commit: df21e1b1ddcc8439b5fa1bb79388403f87742e65
Parents: 2172399
Author: Anu Engineer 
Authored: Tue Aug 28 09:56:02 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 28 09:56:02 2018 -0700

--
 .../apache/hadoop/ozone/OzoneConfigKeys.java|7 -
 .../org/apache/hadoop/ozone/OzoneConsts.java|1 -
 .../common/src/main/resources/ozone-default.xml |   21 -
 .../apache/hadoop/ozone/RatisTestHelper.java|8 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |7 +-
 .../rpc/TestCloseContainerHandlingByClient.java |2 -
 .../ozone/client/rpc/TestOzoneRpcClient.java|9 +-
 .../ozone/container/ContainerTestHelper.java|   10 -
 .../TestContainerDeletionChoosingPolicy.java|8 +-
 .../common/impl/TestContainerPersistence.java   |  116 +-
 .../commandhandler/TestBlockDeletion.java   |8 +-
 .../TestCloseContainerByPipeline.java   |   35 +-
 .../container/ozoneimpl/TestOzoneContainer.java |2 -
 .../ozoneimpl/TestOzoneContainerRatis.java  |2 -
 .../container/ozoneimpl/TestRatisManager.java   |2 -
 .../hadoop/ozone/freon/TestDataValidate.java|7 +-
 .../apache/hadoop/ozone/freon/TestFreon.java|3 +-
 .../ozone/om/TestContainerReportWithKeys.java   |   12 +-
 .../om/TestMultipleContainerReadWrite.java  |5 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java  |7 +-
 .../apache/hadoop/ozone/om/TestOmMetrics.java   |7 +-
 .../apache/hadoop/ozone/om/TestOmSQLCli.java|6 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |5 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java|   20 +-
 .../ozone/web/TestDistributedOzoneVolumes.java  |  188 ---
 .../hadoop/ozone/web/TestLocalOzoneVolumes.java |  187 ---
 .../hadoop/ozone/web/TestOzoneVolumes.java  |  183 +++
 .../hadoop/ozone/web/TestOzoneWebAccess.java|   10 +-
 .../hadoop/ozone/web/client/TestBuckets.java|9 +-
 .../hadoop/ozone/web/client/TestKeysRatis.java  |4 +-
 .../ozone/web/client/TestOzoneClient.java   |3 -
 .../hadoop/ozone/web/client/TestVolume.java |   11 +-
 .../ozone/web/client/TestVolumeRatis.java   |3 -
 .../server/datanode/ObjectStoreHandler.java |  182 ++-
 .../web/handlers/StorageHandlerBuilder.java |   18 +-
 .../web/localstorage/LocalStorageHandler.java   |  385 --
 .../web/localstorage/OzoneMetadataManager.java  | 1138 --
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java |6 +-
 38 files changed, 363 insertions(+), 2274 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df21e1b1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 92f0c41..6ad9085 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -66,16 +66,9 @@ public final class OzoneConfigKeys {
   "dfs.container.ratis.ipc.random.port";
   public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
   false;
-
-  public static final String OZONE_LOCALSTORAGE_ROOT =
-  "ozone.localstorage.root";
-  public static final String OZONE_LOCALSTORAGE_ROOT_DEFAULT = "/tmp/ozone";
   public static final String OZONE_ENABLED =
   "ozone.enabled";
   public static final boolean OZONE_ENABLED_DEFAULT = false;
-  public static final String OZONE_HANDLER_TYPE_KEY =
-  "ozone.handler.type";
-  public static final String OZONE_HANDLER_TYPE_DEFAULT = "distributed";
   public static final String OZONE_TRACE_ENABLED_KEY =
   "ozone.trace.enabled";
   public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df21e1b1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 320a3ed..ab6df92 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.j

[40/50] [abbrv] hadoop git commit: HDDS-392. Incomplete description about auditMap#key in AuditLogging Framework. Contributed by Dinesh Chitlangia.

2018-09-04 Thread ehiggs
HDDS-392. Incomplete description about auditMap#key in AuditLogging Framework.
Contributed by  Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19abaacd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19abaacd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19abaacd

Branch: refs/heads/HDFS-12090
Commit: 19abaacdad84b03fc790341b4b5bcf1c4d41f1fb
Parents: 76bae4c
Author: Anu Engineer 
Authored: Fri Aug 31 22:24:30 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 22:24:30 2018 -0700

--
 .../main/java/org/apache/hadoop/ozone/audit/package-info.java  | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19abaacd/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
index 48de3f7..9c00ef7 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
@@ -50,8 +50,10 @@ package org.apache.hadoop.ozone.audit;
  * The implementing class must override toAuditMap() to return an
  * instance of Map where both Key and Value are String.
  *
- * Key: must not contain any spaces. If the key is multi word then use
- * camel case.
+ * Key: must contain printable US ASCII characters
+ * May not contain a space, =, ], or "
+ * If the key is multi word then use camel case.
+ *
  * Value: if it is a collection/array, then it must be converted to a comma
  * delimited string
  *


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: YARN-8723. Fix a typo in CS init error message when resource calculator is not correctly set. Contributed by Abhishek Modi.

2018-09-04 Thread ehiggs
YARN-8723. Fix a typo in CS init error message when resource calculator is not 
correctly set. Contributed by Abhishek Modi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fa46394
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fa46394
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fa46394

Branch: refs/heads/HDFS-12090
Commit: 3fa46394214181ed1cc7f06b886282bbdf67a10f
Parents: 64ad029
Author: Weiwei Yang 
Authored: Wed Aug 29 10:46:13 2018 +0800
Committer: Weiwei Yang 
Committed: Wed Aug 29 11:13:44 2018 +0800

--
 .../resourcemanager/scheduler/capacity/CapacityScheduler.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fa46394/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index dec1301..81dcf86 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -348,7 +348,7 @@ public class CapacityScheduler extends
 throw new YarnRuntimeException("RM uses DefaultResourceCalculator 
which"
 + " used only memory as resource-type but invalid resource-types"
 + " specified " + ResourceUtils.getResourceTypes() + ". Use"
-+ " DomainantResourceCalculator instead to make effective use of"
++ " DominantResourceCalculator instead to make effective use of"
 + " these resource-types");
   }
   this.usePortForNodeName = this.conf.getUsePortForNodeName();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: HADOOP-15107. Stabilize/tune S3A committers; review correctness & docs. Contributed by Steve Loughran.

2018-09-04 Thread ehiggs
HADOOP-15107. Stabilize/tune S3A committers; review correctness & docs.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a0babf7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a0babf7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a0babf7

Branch: refs/heads/HDFS-12090
Commit: 5a0babf76550f63dad4c17173c4da2bf335c6532
Parents: e8d138c
Author: Steve Loughran 
Authored: Thu Aug 30 14:49:53 2018 +0100
Committer: Steve Loughran 
Committed: Thu Aug 30 14:49:53 2018 +0100

--
 .../lib/output/PathOutputCommitter.java |  12 +-
 .../java/org/apache/hadoop/fs/s3a/Invoker.java  |  15 +-
 .../fs/s3a/commit/AbstractS3ACommitter.java |  16 +-
 .../fs/s3a/commit/S3ACommitterFactory.java  |  18 +-
 .../s3a/commit/magic/MagicS3GuardCommitter.java |   7 +
 .../staging/DirectoryStagingCommitter.java  |   8 +-
 .../staging/PartitionedStagingCommitter.java|   9 +-
 .../hadoop/fs/s3a/commit/staging/Paths.java |  14 +-
 .../fs/s3a/commit/staging/StagingCommitter.java |  50 -
 .../tools/hadoop-aws/committer_architecture.md  |  94 ++---
 .../markdown/tools/hadoop-aws/committers.md |   2 +-
 .../fs/s3a/commit/AbstractCommitITest.java  |  19 ++
 .../fs/s3a/commit/AbstractITCommitMRJob.java|   5 +-
 .../fs/s3a/commit/AbstractITCommitProtocol.java |  63 --
 .../fs/s3a/commit/ITestS3ACommitterFactory.java | 200 +++
 .../fs/s3a/commit/magic/ITMagicCommitMRJob.java |   6 +-
 .../commit/magic/ITestMagicCommitProtocol.java  |  25 ++-
 .../ITStagingCommitMRJobBadDest.java|  62 ++
 .../integration/ITestStagingCommitProtocol.java |  13 ++
 19 files changed, 542 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a0babf7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java
index 3679d9f..5e25f50 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java
@@ -57,8 +57,8 @@ public abstract class PathOutputCommitter extends 
OutputCommitter {
   protected PathOutputCommitter(Path outputPath,
   TaskAttemptContext context) throws IOException {
 this.context = Preconditions.checkNotNull(context, "Null context");
-LOG.debug("Creating committer with output path {} and task context"
-+ " {}", outputPath, context);
+LOG.debug("Instantiating committer {} with output path {} and task context"
++ " {}", this, outputPath, context);
   }
 
   /**
@@ -71,8 +71,8 @@ public abstract class PathOutputCommitter extends 
OutputCommitter {
   protected PathOutputCommitter(Path outputPath,
   JobContext context) throws IOException {
 this.context = Preconditions.checkNotNull(context, "Null context");
-LOG.debug("Creating committer with output path {} and job context"
-+ " {}", outputPath, context);
+LOG.debug("Instantiating committer {} with output path {} and job context"
++ " {}", this, outputPath, context);
   }
 
   /**
@@ -103,6 +103,8 @@ public abstract class PathOutputCommitter extends 
OutputCommitter {
 
   @Override
   public String toString() {
-return "PathOutputCommitter{context=" + context + '}';
+return "PathOutputCommitter{context=" + context
++ "; " + super.toString()
++ '}';
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a0babf7/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
index a007ba1..45912a0 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java
@@ -130,8 +130,9 @@ public class Invoker {
   }
 
   /**
-   * Execute an operation and ignore all raised IOExceptions; log at INFO.
-   * @param log log to log at info.
+   * Execute

[01/50] [abbrv] hadoop git commit: YARN-8705. Refactor the UAM heartbeat thread in preparation for YARN-8696. Contributed by Botong Huang. [Forced Update!]

2018-09-04 Thread ehiggs
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12090 959f49b48 -> 06477abcd (forced update)


YARN-8705. Refactor the UAM heartbeat thread in preparation for YARN-8696. 
Contributed by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1525825
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1525825
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1525825

Branch: refs/heads/HDFS-12090
Commit: f1525825623a1307b5aa55c456b6afa3e0c61135
Parents: 7b1fa56
Author: Giovanni Matteo Fumarola 
Authored: Mon Aug 27 10:32:22 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Mon Aug 27 10:32:22 2018 -0700

--
 .../yarn/server/AMHeartbeatRequestHandler.java  | 227 +
 .../server/uam/UnmanagedApplicationManager.java | 170 ++---
 .../amrmproxy/FederationInterceptor.java| 245 +--
 3 files changed, 358 insertions(+), 284 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1525825/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
new file mode 100644
index 000..42227bb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java
@@ -0,0 +1,227 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
+import org.apache.hadoop.yarn.util.AsyncCallback;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+/**
+ * Extends Thread and provides an implementation that is used for processing 
the
+ * AM heart beat request asynchronously and sending back the response using the
+ * callback method registered with the system.
+ */
+public class AMHeartbeatRequestHandler extends Thread {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(AMHeartbeatRequestHandler.class);
+
+  // Indication flag for the thread to keep running
+  private volatile boolean keepRunning;
+
+  private Configuration conf;
+  private ApplicationId applicationId;
+
+  private BlockingQueue requestQueue;
+  private AMRMClientRelayer rmProxyRelayer;
+  private UserGroupInformation userUgi;
+  private int lastResponseId;
+
+  public AMHeartbeatRequestHandler(Configuration conf,
+  ApplicationId applicationId) {
+super("AMHeartbeatRequestHandler Heartbeat Handler Thread");
+this.setUncaughtExceptionHandler(
+new HeartBeatThreadUncaughtExceptionHandler());
+this.keepRunning = true;
+
+this.conf = conf;
+this.applicationId = applicationId;
+this.requestQueue = new LinkedBlockingQueue<>();
+
+resetLastResponseId();
+  }
+
+  /**
+   * Shutdown the thread.
+   */
+  public void shutdown() {
+this.keepRunning = false;
+this.interrupt();
+  }
+
+  @Override
+  public void run() {
+while (keepRunning) {
+  AsyncAllocateRequestInfo requestInfo;
+  try {
+requestInfo = reques

[29/50] [abbrv] hadoop git commit: HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by Kitti Nanasi.

2018-09-04 Thread ehiggs
HADOOP-15698. KMS log4j is not initialized properly at startup. Contributed by 
Kitti Nanasi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/781437c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/781437c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/781437c2

Branch: refs/heads/HDFS-12090
Commit: 781437c219dc3422797a32dc7ba72cd4f5ee38e2
Parents: 582cb10
Author: Kitti Nanasi 
Authored: Wed Aug 29 22:06:36 2018 -0700
Committer: Xiao Chen 
Committed: Wed Aug 29 22:07:49 2018 -0700

--
 .../crypto/key/kms/server/KMSConfiguration.java | 31 
 .../hadoop/crypto/key/kms/server/KMSWebApp.java | 38 +---
 .../crypto/key/kms/server/KMSWebServer.java |  1 +
 3 files changed, 33 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/781437c2/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index 18eec19..35ffb42 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -103,6 +104,8 @@ public class KMSConfiguration {
 
   public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
 
+  private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
+
   static {
 Configuration.addDefaultResource(KMS_DEFAULT_XML);
 Configuration.addDefaultResource(KMS_SITE_XML);
@@ -159,4 +162,32 @@ public class KMSConfiguration {
 }
 return newer;
   }
+
+  public static void initLogging() {
+String confDir = System.getProperty(KMS_CONFIG_DIR);
+if (confDir == null) {
+  throw new RuntimeException("System property '" +
+  KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
+}
+if (System.getProperty("log4j.configuration") == null) {
+  System.setProperty("log4j.defaultInitOverride", "true");
+  boolean fromClasspath = true;
+  File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
+  if (log4jConf.exists()) {
+PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
+fromClasspath = false;
+  } else {
+ClassLoader cl = Thread.currentThread().getContextClassLoader();
+URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
+if (log4jUrl != null) {
+  PropertyConfigurator.configure(log4jUrl);
+}
+  }
+  LOG.debug("KMS log starting");
+  if (fromClasspath) {
+LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
+LOG.warn("Logging with INFO level to standard output");
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/781437c2/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
index cb4bf7e..0640e25 100644
--- 
a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
+++ 
b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
@@ -17,10 +17,8 @@
  */
 package org.apache.hadoop.crypto.key.kms.server;
 
-import java.io.File;
 import java.io.IOException;
 import java.net.URI;
-import java.net.URL;
 
 import javax.servlet.ServletContextEvent;
 import javax.servlet.ServletContextListener;
@@ -37,14 +35,13 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.VersionInfo;
-import org.apache.log4j.PropertyConfigurator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 public class KMSWebApp implements ServletContextListener {
 
-  private static final 

[39/50] [abbrv] hadoop git commit: HDDS-379. Simplify and improve the cli arg parsing of ozone scmcli. Contributed by Elek, Marton.

2018-09-04 Thread ehiggs
HDDS-379. Simplify and improve the cli arg parsing of ozone scmcli.
Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76bae4cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76bae4cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76bae4cc

Branch: refs/heads/HDFS-12090
Commit: 76bae4ccb1d929260038b1869be8070c2320b617
Parents: 50d2e3e
Author: Anu Engineer 
Authored: Fri Aug 31 18:11:01 2018 -0700
Committer: Anu Engineer 
Committed: Fri Aug 31 18:11:01 2018 -0700

--
 .../common/dev-support/findbugsExcludeFile.xml  |   4 +
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |  82 +++
 .../hadoop/hdds/cli/HddsVersionProvider.java|  35 ++
 .../apache/hadoop/hdds/cli/package-info.java|  22 +
 hadoop-hdds/pom.xml |   5 +
 .../hadoop/hdds/scm/cli/OzoneBaseCLI.java   |  43 --
 .../hdds/scm/cli/OzoneCommandHandler.java   |  87 
 .../apache/hadoop/hdds/scm/cli/ResultCode.java  |  31 --
 .../org/apache/hadoop/hdds/scm/cli/SCMCLI.java  | 246 +++--
 .../cli/container/CloseContainerHandler.java|  85 ---
 .../hdds/scm/cli/container/CloseSubcommand.java |  54 ++
 .../cli/container/ContainerCommandHandler.java  | 128 -
 .../cli/container/CreateContainerHandler.java   |  67 ---
 .../scm/cli/container/CreateSubcommand.java |  65 +++
 .../cli/container/DeleteContainerHandler.java   |  95 
 .../scm/cli/container/DeleteSubcommand.java |  60 +++
 .../scm/cli/container/InfoContainerHandler.java | 114 
 .../hdds/scm/cli/container/InfoSubcommand.java  |  94 
 .../scm/cli/container/ListContainerHandler.java | 117 -
 .../hdds/scm/cli/container/ListSubcommand.java  |  83 +++
 .../hdds/scm/cli/container/package-info.java|   3 +
 .../hadoop/hdds/scm/cli/package-info.java   |  12 +-
 hadoop-ozone/common/src/main/bin/ozone  |   2 +-
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java | 518 ---
 24 files changed, 596 insertions(+), 1456 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76bae4cc/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
--
diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
index daf6fec..c7db679 100644
--- a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml
@@ -21,4 +21,8 @@
   
 
   
+  
+
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76bae4cc/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
new file mode 100644
index 000..2b3e6c0
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.hdds.cli;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import picocli.CommandLine;
+import picocli.CommandLine.ExecutionException;
+import picocli.CommandLine.Option;
+import picocli.CommandLine.ParameterException;
+import picocli.CommandLine.RunLast;
+
+/**
+ * This is a generic parent class for all the ozone related cli tools.
+ */
+public class GenericCli implements Callable {
+
+  @Option(names = {"--verbose"},
+  description = "More verbose output. Show the stack trace of the errors.")
+  private boolean verbose;
+
+  @Option(names = {"-D", "--set"})
+  private Map configurationOverrides = new HashMap<>();
+
+  private final CommandLine cmd;
+
+  public GenericCli() {
+cmd = new CommandLine(this);
+  }
+
+  public void ru

[30/50] [abbrv] hadoop git commit: HADOOP-15667. FileSystemMultipartUploader should verify that UploadHandle has non-0 length. Contributed by Ewan Higgs

2018-09-04 Thread ehiggs
HADOOP-15667. FileSystemMultipartUploader should verify that UploadHandle has 
non-0 length.
Contributed by Ewan Higgs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e6c1109
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e6c1109
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e6c1109

Branch: refs/heads/HDFS-12090
Commit: 2e6c1109dcdeedb59a3345047e9201271c9a0b27
Parents: 781437c
Author: Steve Loughran 
Authored: Thu Aug 30 14:33:16 2018 +0100
Committer: Steve Loughran 
Committed: Thu Aug 30 14:33:16 2018 +0100

--
 .../hadoop/fs/FileSystemMultipartUploader.java  |  6 ++-
 .../org/apache/hadoop/fs/MultipartUploader.java | 11 +
 .../AbstractContractMultipartUploaderTest.java  | 43 
 .../hadoop/fs/s3a/S3AMultipartUploader.java | 10 ++---
 4 files changed, 61 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e6c1109/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
index a700a9f..f13b50b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
@@ -68,6 +68,7 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
   throws IOException {
 
 byte[] uploadIdByteArray = uploadId.toByteArray();
+checkUploadId(uploadIdByteArray);
 Path collectorPath = new Path(new String(uploadIdByteArray, 0,
 uploadIdByteArray.length, Charsets.UTF_8));
 Path partPath =
@@ -101,6 +102,8 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
   List> handles, UploadHandle multipartUploadId)
   throws IOException {
 
+checkUploadId(multipartUploadId.toByteArray());
+
 if (handles.isEmpty()) {
   throw new IOException("Empty upload");
 }
@@ -133,8 +136,7 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
   @Override
   public void abort(Path filePath, UploadHandle uploadId) throws IOException {
 byte[] uploadIdByteArray = uploadId.toByteArray();
-Preconditions.checkArgument(uploadIdByteArray.length != 0,
-"UploadId is empty");
+checkUploadId(uploadIdByteArray);
 Path collectorPath = new Path(new String(uploadIdByteArray, 0,
 uploadIdByteArray.length, Charsets.UTF_8));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e6c1109/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
index 47fd9f2..76f58d3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MultipartUploader.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.List;
 
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -91,4 +92,14 @@ public abstract class MultipartUploader {
   public abstract void abort(Path filePath, UploadHandle multipartUploadId)
   throws IOException;
 
+  /**
+   * Utility method to validate uploadIDs
+   * @param uploadId
+   * @throws IllegalArgumentException
+   */
+  protected void checkUploadId(byte[] uploadId)
+  throws IllegalArgumentException {
+Preconditions.checkArgument(uploadId.length > 0,
+"Empty UploadId is not valid");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e6c1109/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
index c0e1600..85a6861 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
+++ 
b/hadoop-common-project/ha

[03/50] [abbrv] hadoop git commit: YARN-8675. Remove default hostname for docker containers when net=host. Contributed by Suma Shivaprasad

2018-09-04 Thread ehiggs
YARN-8675. Remove default hostname for docker containers when net=host. 
Contributed by Suma Shivaprasad


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05b2bbeb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05b2bbeb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05b2bbeb

Branch: refs/heads/HDFS-12090
Commit: 05b2bbeb357d4fa03e71f2bfd5d8eeb0ea6c3f60
Parents: c9b6395
Author: Billie Rinaldi 
Authored: Mon Aug 27 11:34:33 2018 -0700
Committer: Billie Rinaldi 
Committed: Mon Aug 27 11:34:33 2018 -0700

--
 .../runtime/DockerLinuxContainerRuntime.java| 49 
 1 file changed, 29 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05b2bbeb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 1872830..00771ff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -134,8 +134,8 @@ import static 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  *   
  * {@code YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME} sets the
  * hostname to be used by the Docker container. If not specified, a
- * hostname will be derived from the container ID.  This variable is
- * ignored if the network is 'host' and Registry DNS is not enabled.
+ * hostname will be derived from the container ID and set as default
+ * hostname for networks other than 'host'.
  *   
  *   
  * {@code YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER}
@@ -549,22 +549,34 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 }
   }
 
-  /** Set a DNS friendly hostname. */
-  private void setHostname(DockerRunCommand runCommand, String
-  containerIdStr, String name)
+  /** Set a DNS friendly hostname.
+   *  Only add hostname if network is not host or if hostname is
+   *  specified via YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME
+   *  in host network mode
+   */
+  private void setHostname(DockerRunCommand runCommand,
+  String containerIdStr, String network, String name)
   throws ContainerExecutionException {
-if (name == null || name.isEmpty()) {
-  name = RegistryPathUtils.encodeYarnID(containerIdStr);
 
-  String domain = conf.get(RegistryConstants.KEY_DNS_DOMAIN);
-  if (domain != null) {
-name += ("." + domain);
+if (network.equalsIgnoreCase("host")) {
+  if (name != null && !name.isEmpty()) {
+LOG.info("setting hostname in container to: " + name);
+runCommand.setHostname(name);
   }
-  validateHostname(name);
-}
+} else {
+  //get default hostname
+  if (name == null || name.isEmpty()) {
+name = RegistryPathUtils.encodeYarnID(containerIdStr);
 
-LOG.info("setting hostname in container to: " + name);
-runCommand.setHostname(name);
+String domain = conf.get(RegistryConstants.KEY_DNS_DOMAIN);
+if (domain != null) {
+  name += ("." + domain);
+}
+validateHostname(name);
+  }
+  LOG.info("setting hostname in container to: " + name);
+  runCommand.setHostname(name);
+}
   }
 
   /**
@@ -823,12 +835,9 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
 DockerRunCommand runCommand = new DockerRunCommand(containerIdStr,
 dockerRunAsUser, imageName)
 .setNetworkType(network);
-// Only add hostname if network is not host or if Registry DNS is enabled.
-if (!network.equalsIgnoreCase("host") ||
-conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED,
-RegistryConstants.DEFAULT_DNS_ENABLED)) {
-  setHostname(runCommand, containerIdStr, hostname);
-}
+
+setHostname(runCommand, containerIdStr, network, hostname);
+
 runCommand.setCapabilities(capabiliti

[18/50] [abbrv] hadoop git commit: HDFS-13731. ReencryptionUpdater fails with ConcurrentModificationException during processCheckpoints. Contributed by Zsolt Venczel.

2018-09-04 Thread ehiggs
HDFS-13731. ReencryptionUpdater fails with ConcurrentModificationException 
during processCheckpoints. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e18b957
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e18b957
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e18b957

Branch: refs/heads/HDFS-12090
Commit: 3e18b957ebdf20925224ab9c28e6c2f4b6bbdb24
Parents: c5629d5
Author: Zsolt Venczel 
Authored: Tue Aug 28 15:11:58 2018 -0700
Committer: Xiao Chen 
Committed: Tue Aug 28 15:13:43 2018 -0700

--
 .../server/namenode/ReencryptionHandler.java|  6 +--
 .../server/namenode/ReencryptionUpdater.java| 52 ++--
 2 files changed, 30 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e18b957/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index c8c8d68..a8acccd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -714,10 +714,10 @@ public class ReencryptionHandler implements Runnable {
   zst = new ZoneSubmissionTracker();
   submissions.put(zoneId, zst);
 }
+Future future = batchService.submit(new EDEKReencryptCallable(zoneId,
+currentBatch, reencryptionHandler));
+zst.addTask(future);
   }
-  Future future = batchService.submit(new EDEKReencryptCallable(zoneId,
-  currentBatch, reencryptionHandler));
-  zst.addTask(future);
   LOG.info("Submitted batch (start:{}, size:{}) of zone {} to re-encrypt.",
   currentBatch.getFirstFilePath(), currentBatch.size(), zoneId);
   currentBatch = new ReencryptionBatch(reencryptBatchSize);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e18b957/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
index a5923a7..15cfa92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
@@ -383,32 +383,34 @@ public final class ReencryptionUpdater implements 
Runnable {
 final LinkedList tasks = tracker.getTasks();
 final List xAttrs = Lists.newArrayListWithCapacity(1);
 ListIterator iter = tasks.listIterator();
-while (iter.hasNext()) {
-  Future curr = iter.next();
-  if (curr.isCancelled()) {
-break;
-  }
-  if (!curr.isDone() || !curr.get().processed) {
-// still has earlier tasks not completed, skip here.
-break;
-  }
-  ReencryptionTask task = curr.get();
-  LOG.debug("Updating re-encryption checkpoint with completed task."
-  + " last: {} size:{}.", task.lastFile, task.batch.size());
-  assert zoneId == task.zoneId;
-  try {
-final XAttr xattr = FSDirEncryptionZoneOp
-.updateReencryptionProgress(dir, zoneNode, status, task.lastFile,
-task.numFilesUpdated, task.numFailures);
-xAttrs.clear();
-xAttrs.add(xattr);
-  } catch (IOException ie) {
-LOG.warn("Failed to update re-encrypted progress to xattr for zone {}",
-zonePath, ie);
-++task.numFailures;
+synchronized (handler) {
+  while (iter.hasNext()) {
+Future curr = iter.next();
+if (curr.isCancelled()) {
+  break;
+}
+if (!curr.isDone() || !curr.get().processed) {
+  // still has earlier tasks not completed, skip here.
+  break;
+}
+ReencryptionTask task = curr.get();
+LOG.debug("Updating re-encryption checkpoint with completed task."
++ " last: {} size:{}.", task.lastFile, task.batch.size());
+assert zoneId == task.zoneId;
+try {
+  final XAttr xattr = FSDirEncryptionZoneOp
+  .updateReencryptionProgress(dir, zoneNode, status, task.lastFile

[25/50] [abbrv] hadoop git commit: YARN-8642. Add support for tmpfs mounts with the Docker runtime. Contributed by Craig Condit

2018-09-04 Thread ehiggs
YARN-8642. Add support for tmpfs mounts with the Docker runtime. Contributed by 
Craig Condit


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73625168
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73625168
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73625168

Branch: refs/heads/HDFS-12090
Commit: 73625168c0f29aa646d7a715c9fb15e43d6c7e05
Parents: a0ebb6b
Author: Shane Kumpf 
Authored: Wed Aug 29 07:08:37 2018 -0600
Committer: Shane Kumpf 
Committed: Wed Aug 29 07:08:37 2018 -0600

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   5 +
 .../src/main/resources/yarn-default.xml |   7 +
 .../runtime/DockerLinuxContainerRuntime.java|  38 +
 .../linux/runtime/docker/DockerRunCommand.java  |   5 +
 .../container-executor/impl/utils/docker-util.c |  42 ++
 .../container-executor/impl/utils/docker-util.h |   3 +-
 .../test/utils/test_docker_util.cc  |  64 
 .../runtime/TestDockerContainerRuntime.java | 149 +++
 .../runtime/docker/TestDockerRunCommand.java|   5 +-
 .../src/site/markdown/DockerContainers.md   |   1 +
 10 files changed, 317 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73625168/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 148edb9..d525e4d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2012,6 +2012,11 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_DOCKER_DEFAULT_RW_MOUNTS =
   DOCKER_CONTAINER_RUNTIME_PREFIX + "default-rw-mounts";
 
+  /** The default list of tmpfs mounts to be mounted into all
+   *  Docker containers that use DockerContainerRuntime. */
+  public static final String NM_DOCKER_DEFAULT_TMPFS_MOUNTS =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "default-tmpfs-mounts";
+
   /** The mode in which the Java Container Sandbox should run detailed by
*  the JavaSandboxLinuxContainerRuntime. */
   public static final String YARN_CONTAINER_SANDBOX =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73625168/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 72e42d8..4262436 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1828,6 +1828,13 @@
   
 
   
+The default list of tmpfs mounts to be mounted into all Docker
+  containers that use DockerContainerRuntime.
+yarn.nodemanager.runtime.linux.docker.default-tmpfs-mounts
+
+  
+
+  
 The mode in which the Java Container Sandbox should run 
detailed by
   the JavaSandboxLinuxContainerRuntime.
 yarn.nodemanager.runtime.linux.sandbox-mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73625168/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 00771ff..0ae3d0f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainer

[14/50] [abbrv] hadoop git commit: HDDS-376. Create custom message structure for use in AuditLogging Contributed by Dinesh Chitlangia.

2018-09-04 Thread ehiggs
HDDS-376. Create custom message structure for use in AuditLogging
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac515d22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac515d22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac515d22

Branch: refs/heads/HDFS-12090
Commit: ac515d22d84478acbed92ef4024d9a3d3f329c8a
Parents: cb9d371
Author: Anu Engineer 
Authored: Tue Aug 28 12:59:08 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 28 12:59:08 2018 -0700

--
 .../apache/hadoop/ozone/audit/AuditLogger.java  |  66 --
 .../apache/hadoop/ozone/audit/AuditMessage.java |  64 ++
 .../apache/hadoop/ozone/audit/package-info.java |  19 ++-
 .../ozone/audit/TestOzoneAuditLogger.java   | 124 ---
 4 files changed, 177 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac515d22/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
index 46ffaab..ee20c66 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
@@ -21,10 +21,8 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.logging.log4j.Level;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Marker;
-import org.apache.logging.log4j.message.StructuredDataMessage;
 import org.apache.logging.log4j.spi.ExtendedLogger;
 
-import java.util.Map;
 
 /**
  * Class to define Audit Logger for Ozone.
@@ -32,16 +30,13 @@ import java.util.Map;
 public class AuditLogger {
 
   private ExtendedLogger logger;
-
-  private static final String SUCCESS = AuditEventStatus.SUCCESS.getStatus();
-  private static final String FAILURE = AuditEventStatus.FAILURE.getStatus();
   private static final String FQCN = AuditLogger.class.getName();
   private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker();
   private static final Marker READ_MARKER = AuditMarker.READ.getMarker();
 
   /**
* Parametrized Constructor to initialize logger.
-   * @param type
+   * @param type Audit Logger Type
*/
   public AuditLogger(AuditLoggerType type){
 initializeLogger(type);
@@ -60,68 +55,53 @@ public class AuditLogger {
 return logger;
   }
 
-  public void logWriteSuccess(AuditAction type, Map data) {
-logWriteSuccess(type, data, Level.INFO);
+  public void logWriteSuccess(AuditMessage msg) {
+logWriteSuccess(Level.INFO, msg);
   }
 
-  public void logWriteSuccess(AuditAction type, Map data, Level
-  level) {
-StructuredDataMessage msg = new StructuredDataMessage("", SUCCESS,
-type.getAction(), data);
+  public void logWriteSuccess(Level level, AuditMessage msg) {
 this.logger.logIfEnabled(FQCN, level, WRITE_MARKER, msg, null);
   }
 
-
-  public void logWriteFailure(AuditAction type, Map data) {
-logWriteFailure(type, data, Level.INFO, null);
+  public void logWriteFailure(AuditMessage msg) {
+logWriteFailure(Level.ERROR, msg);
   }
 
-  public void logWriteFailure(AuditAction type, Map data, Level
-  level) {
-logWriteFailure(type, data, level, null);
+  public void logWriteFailure(Level level, AuditMessage msg) {
+logWriteFailure(level, msg, null);
   }
 
-  public void logWriteFailure(AuditAction type, Map data,
-  Throwable exception) {
-logWriteFailure(type, data, Level.INFO, exception);
+  public void logWriteFailure(AuditMessage msg, Throwable exception) {
+logWriteFailure(Level.ERROR, msg, exception);
   }
 
-  public void logWriteFailure(AuditAction type, Map data, Level
-  level, Throwable exception) {
-StructuredDataMessage msg = new StructuredDataMessage("", FAILURE,
-type.getAction(), data);
+  public void logWriteFailure(Level level, AuditMessage msg,
+  Throwable exception) {
 this.logger.logIfEnabled(FQCN, level, WRITE_MARKER, msg, exception);
   }
 
-  public void logReadSuccess(AuditAction type, Map data) {
-logReadSuccess(type, data, Level.INFO);
+  public void logReadSuccess(AuditMessage msg) {
+logReadSuccess(Level.INFO, msg);
   }
 
-  public void logReadSuccess(AuditAction type, Map data, Level
-  level) {
-StructuredDataMessage msg = new StructuredDataMessage("", SUCCESS,
-type.getAction(), data);
+  public void logReadSuccess(Level level, AuditMessage msg) {
 this.logger.logIfEnabled(FQCN, level, READ_MARKER, msg, null);
   }
 
-  public void logReadFailure(AuditAc

[24/50] [abbrv] hadoop git commit: HDFS-13634. RBF: Configurable value in xml for async connection request queue size. Contributed by CR Hota.

2018-09-04 Thread ehiggs
HDFS-13634. RBF: Configurable value in xml for async connection request queue 
size. Contributed by CR Hota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0ebb6b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0ebb6b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0ebb6b3

Branch: refs/heads/HDFS-12090
Commit: a0ebb6b39f2932d3ea2fb5e287f52b841e108428
Parents: 0bd4217
Author: Yiqun Lin 
Authored: Wed Aug 29 16:15:22 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Aug 29 16:15:22 2018 +0800

--
 .../federation/router/ConnectionManager.java  | 18 +++---
 .../server/federation/router/RBFConfigKeys.java   |  5 +
 .../src/main/resources/hdfs-rbf-default.xml   |  8 
 3 files changed, 24 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0ebb6b3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
index 0b50845..9fb83e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
@@ -49,9 +49,6 @@ public class ConnectionManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(ConnectionManager.class);
 
-  /** Number of parallel new connections to create. */
-  protected static final int MAX_NEW_CONNECTIONS = 100;
-
   /** Minimum amount of active connections: 50%. */
   protected static final float MIN_ACTIVE_RATIO = 0.5f;
 
@@ -77,8 +74,10 @@ public class ConnectionManager {
   private final Lock writeLock = readWriteLock.writeLock();
 
   /** Queue for creating new connections. */
-  private final BlockingQueue creatorQueue =
-  new ArrayBlockingQueue<>(MAX_NEW_CONNECTIONS);
+  private final BlockingQueue creatorQueue;
+  /** Max size of queue for creating new connections. */
+  private final int creatorQueueMaxSize;
+
   /** Create new connections asynchronously. */
   private final ConnectionCreator creator;
   /** Periodic executor to remove stale connection pools. */
@@ -106,7 +105,12 @@ public class ConnectionManager {
 this.pools = new HashMap<>();
 
 // Create connections in a thread asynchronously
-this.creator = new ConnectionCreator(creatorQueue);
+this.creatorQueueMaxSize = this.conf.getInt(
+RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE,
+RBFConfigKeys.DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE_DEFAULT
+);
+this.creatorQueue = new ArrayBlockingQueue<>(this.creatorQueueMaxSize);
+this.creator = new ConnectionCreator(this.creatorQueue);
 this.creator.setDaemon(true);
 
 // Cleanup periods
@@ -213,7 +217,7 @@ public class ConnectionManager {
 if (conn == null || !conn.isUsable()) {
   if (!this.creatorQueue.offer(pool)) {
 LOG.error("Cannot add more than {} connections at the same time",
-MAX_NEW_CONNECTIONS);
+this.creatorQueueMaxSize);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0ebb6b3/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
index 87df5d2..997e1dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RBFConfigKeys.java
@@ -93,6 +93,11 @@ public class RBFConfigKeys extends 
CommonConfigurationKeysPublic {
   TimeUnit.SECONDS.toMillis(5);
 
   // HDFS Router NN client
+  public static final String
+  DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE =
+  FEDERATION_ROUTER_PREFIX + "connection.creator.queue-size";
+  public static final int
+  DFS_ROUTER_NAMENODE_CONNECTION_CREATOR_QUEUE_SIZE_DEFAULT = 100;
   public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
   FEDERATION_ROUTER_PREFIX + "connection.pool-size";
   public static final 

[28/50] [abbrv] hadoop git commit: HDFS-13863. FsDatasetImpl should log DiskOutOfSpaceException. Contributed by Fei Hui.

2018-09-04 Thread ehiggs
HDFS-13863. FsDatasetImpl should log DiskOutOfSpaceException. Contributed by 
Fei Hui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/582cb10e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/582cb10e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/582cb10e

Branch: refs/heads/HDFS-12090
Commit: 582cb10ec74ed5666946a3769002ceb80ba660cb
Parents: d53a10b
Author: Yiqun Lin 
Authored: Thu Aug 30 11:21:13 2018 +0800
Committer: Yiqun Lin 
Committed: Thu Aug 30 11:21:13 2018 +0800

--
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/582cb10e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d7f133e..27196c2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1397,6 +1397,9 @@ class FsDatasetImpl implements FsDatasetSpi 
{
   datanode.getMetrics().incrRamDiskBlocksWrite();
 } catch (DiskOutOfSpaceException de) {
   // Ignore the exception since we just fall back to persistent 
storage.
+  LOG.warn("Insufficient space for placing the block on a transient "
+  + "volume, fall back to persistent storage: "
+  + de.getMessage());
 } finally {
   if (ref == null) {
 cacheManager.release(b.getNumBytes());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: HDFS-13854. RBF: The ProcessingAvgTime and ProxyAvgTime should display by JMX with ms unit. Contributed by yanghuafeng.

2018-09-04 Thread ehiggs
HDFS-13854. RBF: The ProcessingAvgTime and ProxyAvgTime should display by JMX 
with ms unit. Contributed by yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64ad0298
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64ad0298
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64ad0298

Branch: refs/heads/HDFS-12090
Commit: 64ad0298d441559951bc9589a40f8aab17c93a5f
Parents: 2651e2c
Author: Brahma Reddy Battula 
Authored: Wed Aug 29 08:29:50 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Aug 29 08:29:50 2018 +0530

--
 .../federation/metrics/FederationRPCMetrics.java | 13 ++---
 .../metrics/FederationRPCPerformanceMonitor.java | 15 +--
 2 files changed, 7 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64ad0298/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
index 9ab4e5a..cce4b86 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
@@ -86,15 +86,6 @@ public class FederationRPCMetrics implements 
FederationRPCMBean {
   }
 
   /**
-   * Convert nanoseconds to milliseconds.
-   * @param ns Time in nanoseconds.
-   * @return Time in milliseconds.
-   */
-  private static double toMs(double ns) {
-return ns / 100;
-  }
-
-  /**
* Reset the metrics system.
*/
   public static void reset() {
@@ -230,7 +221,7 @@ public class FederationRPCMetrics implements 
FederationRPCMBean {
 
   @Override
   public double getProxyAvg() {
-return toMs(proxy.lastStat().mean());
+return proxy.lastStat().mean();
   }
 
   @Override
@@ -250,7 +241,7 @@ public class FederationRPCMetrics implements 
FederationRPCMBean {
 
   @Override
   public double getProcessingAvg() {
-return toMs(processing.lastStat().mean());
+return processing.lastStat().mean();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64ad0298/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
index 2c2741e..15725d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
@@ -35,6 +35,8 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
+import static org.apache.hadoop.util.Time.monotonicNow;
+
 /**
  * Customizable RPC performance monitor. Receives events from the RPC server
  * and aggregates them via JMX.
@@ -120,12 +122,12 @@ public class FederationRPCPerformanceMonitor implements 
RouterRpcMonitor {
 
   @Override
   public void startOp() {
-START_TIME.set(this.getNow());
+START_TIME.set(monotonicNow());
   }
 
   @Override
   public long proxyOp() {
-PROXY_TIME.set(this.getNow());
+PROXY_TIME.set(monotonicNow());
 long processingTime = getProcessingTime();
 if (processingTime >= 0) {
   metrics.addProcessingTime(processingTime);
@@ -188,13 +190,6 @@ public class FederationRPCPerformanceMonitor implements 
RouterRpcMonitor {
 metrics.incrRouterFailureLocked();
   }
 
-  /**
-   * Get current time.
-   * @return Current time in nanoseconds.
-   */
-  private long getNow() {
-return System.nanoTime();
-  }
 
   /**
* Get time between we receiving the operation and sending it to the 
Namenode.
@@ -214,7 +209,7 @@ public class FederationRPCPerformanceMonitor implements 
RouterRpcMonitor {
*/
   private long getProxyTime() {
 if (PROXY_TIME.get() != null && PROXY_TIME.get() > 0) {
-  return getNow() - PROXY_TIME.get();
+  return monotonicNow() - PROXY_TIME.get();
 }
 return -1;
   }


--

[13/50] [abbrv] hadoop git commit: HDFS-13861. RBF: Illegal Router Admin command leads to printing usage for all commands. Contributed by Ayush Saxena.

2018-09-04 Thread ehiggs
HDFS-13861. RBF: Illegal Router Admin command leads to printing usage for all 
commands. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb9d371a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb9d371a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb9d371a

Branch: refs/heads/HDFS-12090
Commit: cb9d371ae2cda1624fc83316ddc09de37d8d0bd3
Parents: fd089ca
Author: Brahma Reddy Battula 
Authored: Wed Aug 29 00:29:05 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Aug 29 00:29:05 2018 +0530

--
 .../hdfs/tools/federation/RouterAdmin.java  | 92 +---
 .../federation/router/TestRouterAdminCLI.java   | 68 +++
 2 files changed, 130 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb9d371a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index f88d0a6..46be373 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -94,25 +94,58 @@ public class RouterAdmin extends Configured implements Tool 
{
* Print the usage message.
*/
   public void printUsage() {
-String usage = "Federation Admin Tools:\n"
-+ "\t[-add"
-+ "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
-+ "-owner  -group  -mode ]\n"
-+ "\t[-update
"
-+ "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
-+ "-owner  -group  -mode ]\n"
-+ "\t[-rm ]\n"
-+ "\t[-ls ]\n"
-+ "\t[-setQuota  -nsQuota  -ssQuota "
-+ "]\n"
-+ "\t[-clrQuota ]\n"
-+ "\t[-safemode enter | leave | get]\n"
-+ "\t[-nameservice enable | disable ]\n"
-+ "\t[-getDisabledNameservices]\n";
+String usage = getUsage(null);
+System.out.println(usage);
+  }
 
+  private void printUsage(String cmd) {
+String usage = getUsage(cmd);
 System.out.println(usage);
   }
 
+  private String getUsage(String cmd) {
+if (cmd == null) {
+  String[] commands =
+  {"-add", "-update", "-rm", "-ls", "-setQuota", "-clrQuota",
+  "-safemode", "-nameservice", "-getDisabledNameservices"};
+  StringBuilder usage = new StringBuilder();
+  usage.append("Usage: hdfs routeradmin :\n");
+  for (int i = 0; i < commands.length; i++) {
+usage.append(getUsage(commands[i]));
+if (i + 1 < commands.length) {
+  usage.append("\n");
+}
+  }
+  return usage.toString();
+}
+if (cmd.equals("-add")) {
+  return "\t[-add
"
+  + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
+  + "-owner  -group  -mode ]";
+} else if (cmd.equals("-update")) {
+  return "\t[-update   "
+  + " "
+  + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL] "
+  + "-owner  -group  -mode ]";
+} else if (cmd.equals("-rm")) {
+  return "\t[-rm ]";
+} else if (cmd.equals("-ls")) {
+  return "\t[-ls ]";
+} else if (cmd.equals("-setQuota")) {
+  return "\t[-setQuota  -nsQuota  -ssQuota "
+  + "]";
+} else if (cmd.equals("-clrQuota")) {
+  return "\t[-clrQuota ]";
+} else if (cmd.equals("-safemode")) {
+  return "\t[-safemode enter | leave | get]";
+} else if (cmd.equals("-nameservice")) {
+  return "\t[-nameservice enable | disable ]";
+} else if (cmd.equals("-getDisabledNameservices")) {
+  return "\t[-getDisabledNameservices]";
+}
+return getUsage(null);
+  }
+
   @Override
   public int run(String[] argv) throws Exception {
 if (argv.length < 1) {
@@ -129,43 +162,43 @@ public class RouterAdmin extends Configured implements 
Tool {
 if ("-add".equals(cmd)) {
   if (argv.length < 4) {
 System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage();
+printUsage(cmd);
 return exitCode;
   }
 } else if ("-update".equals(cmd)) {
   if (argv.length < 4) {
 System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage();
+printUsage(cmd);
 return exitCode;
   }
-} else if ("-rm".equalsIgnoreCase(cmd)) {
+} else if ("-rm".equals(cmd)) {
   if (argv.length < 2) {
 System.err.println("Not enough parameters 

[16/50] [abbrv] hadoop git commit: HDDS-382. Remove RatisTestHelper#RatisTestSuite constructor argument and fix checkstyle in ContainerTestHelper, GenericTestUtils Contributed by Nandakumar.

2018-09-04 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5629d54/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index 3b4426c..b652b6b 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -51,9 +51,9 @@ public class TestSCMContainerPlacementRandom {
 .thenReturn(new ArrayList<>(datanodes));
 
 when(mockNodeManager.getNodeStat(anyObject()))
-.thenReturn(new SCMNodeMetric(100l, 0l, 100l));
+.thenReturn(new SCMNodeMetric(100L, 0L, 100L));
 when(mockNodeManager.getNodeStat(datanodes.get(2)))
-.thenReturn(new SCMNodeMetric(100l, 90l, 10l));
+.thenReturn(new SCMNodeMetric(100L, 90L, 10L));
 
 SCMContainerPlacementRandom scmContainerPlacementRandom =
 new SCMContainerPlacementRandom(mockNodeManager, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5629d54/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index fa87706..da05913 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -21,7 +21,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
-import java.util.UUID;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -132,7 +131,7 @@ public class TestReplicationManager {
   //WHEN
 
   queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
-  new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+  new ReplicationRequest(1L, (short) 2, System.currentTimeMillis(),
   (short) 3));
 
   Thread.sleep(500L);
@@ -159,10 +158,8 @@ public class TestReplicationManager {
   leaseManager.start();
 
   ReplicationManager replicationManager =
-  new ReplicationManager(containerPlacementPolicy, 
containerStateManager,
-
-
-  queue, leaseManager) {
+  new ReplicationManager(containerPlacementPolicy,
+  containerStateManager, queue, leaseManager) {
 @Override
 protected List getCurrentReplicas(
 ReplicationRequest request) throws IOException {
@@ -172,7 +169,7 @@ public class TestReplicationManager {
   replicationManager.start();
 
   queue.fireEvent(SCMEvents.REPLICATE_CONTAINER,
-  new ReplicationRequest(1l, (short) 2, System.currentTimeMillis(),
+  new ReplicationRequest(1L, (short) 2, System.currentTimeMillis(),
   (short) 3));
 
   Thread.sleep(500L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5629d54/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
--
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
index a593718..9dd4fe3 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationQueue.java
@@ -92,8 +92,8 @@ public class TestReplicationQueue {
 1, replicationQueue.size());
 Assert.assertEquals(temp, msg5);
 
-// Message 2 should be ordered before message 5 as both have same 
replication
-// number but message 2 has earlier timestamp.
+// Message 2 should be ordered before message 5 as both have same
+// replication number but message 2 has earlier timestamp.
 temp = replicationQueue.take();
 Assert.assertEquals("Should have 0 objects",
 replicationQueue.size(), 0);

http://git-wip

[04/50] [abbrv] hadoop git commit: HADOOP-15699. Fix some of testContainerManager failures in Windows. Contributed by Botong Huang.

2018-09-04 Thread ehiggs
HADOOP-15699. Fix some of testContainerManager failures in Windows. Contributed 
by Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/602d1384
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/602d1384
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/602d1384

Branch: refs/heads/HDFS-12090
Commit: 602d13844a8d4c7b08ce185da01fde098ff8b9a6
Parents: 05b2bbe
Author: Giovanni Matteo Fumarola 
Authored: Mon Aug 27 12:25:46 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Mon Aug 27 12:25:46 2018 -0700

--
 .../containermanager/TestContainerManager.java| 18 ++
 1 file changed, 6 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/602d1384/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index ee5259f..d28340b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -320,9 +320,8 @@ public class TestContainerManager extends 
BaseContainerManagerTest {
 
   @Test (timeout = 1L)
   public void testAuxPathHandler() throws Exception {
-File testDir = GenericTestUtils.getTestDir(GenericTestUtils.getTestDir(
-TestContainerManager.class.getSimpleName() + "LocDir").
-getAbsolutePath());
+File testDir = GenericTestUtils
+.getTestDir(TestContainerManager.class.getSimpleName() + "LocDir");
 testDir.mkdirs();
 File testFile = new File(testDir, "test");
 testFile.createNewFile();
@@ -1977,15 +1976,11 @@ public class TestContainerManager extends 
BaseContainerManagerTest {
 Signal signal = ContainerLaunch.translateCommandToSignal(command);
 containerManager.start();
 
-File scriptFile = new File(tmpDir, "scriptFile.sh");
+File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
 PrintWriter fileWriter = new PrintWriter(scriptFile);
 File processStartFile =
 new File(tmpDir, "start_file.txt").getAbsoluteFile();
-fileWriter.write("\numask 0"); // So that start file is readable by the 
test
-fileWriter.write("\necho Hello World! > " + processStartFile);
-fileWriter.write("\necho $$ >> " + processStartFile);
-fileWriter.write("\nexec sleep 1000s");
-fileWriter.close();
+writeScriptFile(fileWriter, "Hello world!", processStartFile, null, false);
 
 ContainerLaunchContext containerLaunchContext =
 recordFactory.newRecordInstance(ContainerLaunchContext.class);
@@ -2008,9 +2003,8 @@ public class TestContainerManager extends 
BaseContainerManagerTest {
 new HashMap();
 localResources.put(destinationFile, rsrc_alpha);
 containerLaunchContext.setLocalResources(localResources);
-List commands = new ArrayList<>();
-commands.add("/bin/bash");
-commands.add(scriptFile.getAbsolutePath());
+List commands =
+Arrays.asList(Shell.getRunScriptCommand(scriptFile));
 containerLaunchContext.setCommands(commands);
 StartContainerRequest scRequest =
 StartContainerRequest.newInstance(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: HDDS-379. Simplify and improve the cli arg parsing of ozone scmcli. Contributed by Elek, Marton.

2018-09-04 Thread ehiggs
http://git-wip-us.apache.org/repos/asf/hadoop/blob/76bae4cc/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
deleted file mode 100644
index 722c1a5..000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ /dev/null
@@ -1,518 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.cli.ResultCode;
-import org.apache.hadoop.hdds.scm.cli.SCMCLI;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
-import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
-
-import static org.apache.hadoop.hdds.scm.cli.ResultCode.EXECUTION_ERROR;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-/**
- * This class tests the CLI of SCM.
- */
-@Ignore ("Needs to be fixed for new SCM and Storage design")
-public class TestSCMCli {
-  private static SCMCLI cli;
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
-  storageContainerLocationClient;
-
-  private static StorageContainerManager scm;
-  private static ScmClient containerOperationClient;
-
-  private static ByteArrayOutputStream outContent;
-  private static PrintStream outStream;
-  private static ByteArrayOutputStream errContent;
-  private static PrintStream errStream;
-  private static XceiverClientManager xceiverClientManager;
-  private static String containerOwner = "OZONE";
-
-  @Rule
-  public Timeout globalTimeout = new Timeout(3);
-
-  @BeforeClass
-  public static void setup() throws Exception {
-conf = new OzoneConfiguration();
-cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
-cluster.waitForClusterToBeReady();
-xceiverClientManager = new XceiverClientManager(conf);
-storageContainerLocationClient =
-cluster.getStorageContainerLocationClient();
-containerOperationClient = new ContainerOperationClient(
-storageContainerLocationClient, new XceiverClientManager(conf));
-outContent = new ByteArrayOutputStream();
-outStream = new PrintStream(outContent);
-errContent = new ByteArrayOutputStream();
-errStream = new Prin

[17/50] [abbrv] hadoop git commit: HDDS-382. Remove RatisTestHelper#RatisTestSuite constructor argument and fix checkstyle in ContainerTestHelper, GenericTestUtils Contributed by Nandakumar.

2018-09-04 Thread ehiggs
HDDS-382. Remove RatisTestHelper#RatisTestSuite constructor argument and fix 
checkstyle in ContainerTestHelper, GenericTestUtils
Contributed by Nandakumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5629d54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5629d54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5629d54

Branch: refs/heads/HDFS-12090
Commit: c5629d546d64091a14560df488a7f797a150337e
Parents: 33f42ef
Author: Anu Engineer 
Authored: Tue Aug 28 14:06:19 2018 -0700
Committer: Anu Engineer 
Committed: Tue Aug 28 14:06:19 2018 -0700

--
 .../apache/hadoop/hdds/scm/XceiverClient.java   |  6 +--
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |  6 +--
 .../hadoop/hdds/scm/XceiverClientManager.java   |  2 +-
 .../hdds/scm/storage/ChunkInputStream.java  |  7 +--
 .../hdds/scm/storage/ChunkOutputStream.java |  4 +-
 .../org/apache/hadoop/hdds/client/BlockID.java  |  5 +-
 .../hadoop/hdds/scm/XceiverClientSpi.java   |  2 -
 .../common/helpers/AllocatedBlock.java  |  4 +-
 .../container/common/helpers/ContainerInfo.java | 12 ++---
 .../common/helpers/ContainerWithPipeline.java   |  7 +--
 .../scm/container/common/helpers/Pipeline.java  | 11 ++---
 .../StorageContainerLocationProtocol.java   |  6 ++-
 ...rLocationProtocolClientSideTranslatorPB.java | 21 
 .../scm/storage/ContainerProtocolCalls.java |  6 +--
 .../org/apache/hadoop/ozone/OzoneConsts.java|  5 --
 .../ozone/container/common/helpers/KeyData.java |  8 ++--
 .../apache/hadoop/utils/HddsVersionInfo.java|  6 ++-
 .../apache/hadoop/utils/TestMetadataStore.java  |  1 -
 .../hadoop/ozone/HddsDatanodeService.java   |  3 +-
 .../common/helpers/ContainerUtils.java  | 22 -
 .../container/common/impl/ContainerSet.java |  2 +-
 .../common/impl/OpenContainerBlockMap.java  | 19 
 .../server/ratis/XceiverServerRatis.java|  6 +--
 .../keyvalue/interfaces/KeyManager.java |  4 +-
 .../ozone/protocol/commands/CommandStatus.java  | 16 +++
 .../ozone/container/common/ScmTestMock.java |  6 ++-
 .../common/interfaces/TestHandler.java  |  7 ---
 .../endpoint/TestHeartbeatEndpointTask.java |  2 -
 .../TestRoundRobinVolumeChoosingPolicy.java |  5 +-
 .../container/ozoneimpl/TestOzoneContainer.java |  3 +-
 .../hadoop/hdds/server/events/EventWatcher.java |  6 ++-
 .../hdds/server/events/TestEventQueue.java  |  3 --
 .../hadoop/hdds/scm/block/BlockManagerImpl.java | 18 +++
 .../hdds/scm/block/DeletedBlockLogImpl.java |  3 +-
 .../hdds/scm/block/SCMBlockDeletingService.java |  4 +-
 .../container/CloseContainerEventHandler.java   |  4 +-
 .../hdds/scm/container/ContainerMapping.java|  4 +-
 .../scm/container/ContainerStateManager.java|  7 +--
 .../replication/ReplicationManager.java |  2 +-
 .../scm/container/states/ContainerStateMap.java |  2 +-
 .../hdds/scm/node/states/Node2ContainerMap.java |  4 +-
 .../scm/node/states/NodeNotFoundException.java  |  2 -
 .../hdds/scm/node/states/ReportResult.java  |  3 +-
 .../hdds/scm/pipelines/Node2PipelineMap.java| 50 +---
 .../hdds/scm/pipelines/PipelineManager.java |  6 +--
 .../hdds/scm/pipelines/PipelineSelector.java|  7 +--
 .../scm/server/SCMClientProtocolServer.java |  3 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  8 ++--
 .../hadoop/hdds/scm/block/TestBlockManager.java |  1 -
 .../hdds/scm/block/TestDeletedBlockLog.java |  7 +--
 .../command/TestCommandStatusReportHandler.java | 22 -
 .../TestCloseContainerEventHandler.java |  1 -
 .../scm/container/TestContainerMapping.java |  7 +--
 .../container/TestContainerReportHandler.java   |  2 +-
 .../TestSCMContainerPlacementCapacity.java  |  8 ++--
 .../TestSCMContainerPlacementRandom.java|  4 +-
 .../replication/TestReplicationManager.java | 11 ++---
 .../replication/TestReplicationQueue.java   |  4 +-
 .../hdds/scm/node/TestContainerPlacement.java   |  5 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  3 +-
 .../hdds/scm/node/TestNodeReportHandler.java|  3 +-
 .../ozone/container/common/TestEndPoint.java|  9 ++--
 .../placement/TestContainerPlacement.java   |  6 ++-
 .../apache/hadoop/ozone/client/ObjectStore.java |  7 ++-
 .../hdds/scm/pipeline/TestPipelineClose.java|  4 --
 .../apache/hadoop/ozone/RatisTestHelper.java|  8 ++--
 .../TestStorageContainerManagerHelper.java  |  2 -
 .../rpc/TestCloseContainerHandlingByClient.java |  3 +-
 .../ozone/container/ContainerTestHelper.java|  2 -
 .../common/impl/TestContainerPersistence.java   |  1 -
 .../ozoneimpl/TestOzoneContainerRatis.java  |  3 +-
 .../container/ozoneimpl/TestRatisManager.java   |  4 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java |  2 -
 .../hadoop/ozone/web/TestOzoneWebAcc