[hadoop] branch trunk updated: HADOOP-17111. Replace Guava Optional with Java8+ Optional. Contributed by Ahmed Hussein.

2020-07-06 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 639acb6  HADOOP-17111. Replace Guava Optional with Java8+ Optional. 
Contributed by Ahmed Hussein.
639acb6 is described below

commit 639acb6d8921127cde3174a302f2e3d71b44f052
Author: Akira Ajisaka 
AuthorDate: Mon Jul 6 16:08:36 2020 +0900

HADOOP-17111. Replace Guava Optional with Java8+ Optional. Contributed by 
Ahmed Hussein.
---
 .../src/main/resources/checkstyle/checkstyle.xml   |  7 ++-
 .../nodemanager/DefaultContainerExecutor.java  | 19 +++
 .../server/nodemanager/LinuxContainerExecutor.java | 58 +++---
 .../recovery/TestZKRMStateStorePerf.java   | 23 -
 4 files changed, 66 insertions(+), 41 deletions(-)

diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml 
b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
index 8f3d3f1..54a5943 100644
--- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
+++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
@@ -119,7 +119,12 @@
 
 
 
- 
+
+  
+  
+  
+
 
 
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index c5fc481..b8f94b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -21,12 +21,7 @@ package org.apache.hadoop.yarn.server.nodemanager;
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
 
-import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
-import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
+import com.google.common.annotations.VisibleForTesting;
 import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -38,7 +33,7 @@ import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
-
+import java.util.Optional;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.fs.FileContext;
@@ -46,6 +41,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.service.ServiceStateException;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.CommandExecutor;
@@ -60,15 +56,16 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext;
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The {@code DefaultContainerExecuter} class offers generic container
@@ -333,7 +330,7 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 builder.append("Exception from container-launch.\n")
 .append("Container id: ").append(con

[hadoop] branch branch-3.3 updated: HADOOP-17111. Replace Guava Optional with Java8+ Optional. Contributed by Ahmed Hussein.

2020-07-06 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 20df70a  HADOOP-17111. Replace Guava Optional with Java8+ Optional. 
Contributed by Ahmed Hussein.
20df70a is described below

commit 20df70a895d3a4c40cb1fa3fe75be543ddbc1525
Author: Akira Ajisaka 
AuthorDate: Mon Jul 6 16:08:36 2020 +0900

HADOOP-17111. Replace Guava Optional with Java8+ Optional. Contributed by 
Ahmed Hussein.

(cherry picked from commit 639acb6d8921127cde3174a302f2e3d71b44f052)
---
 .../src/main/resources/checkstyle/checkstyle.xml   |  7 ++-
 .../nodemanager/DefaultContainerExecutor.java  | 19 +++
 .../server/nodemanager/LinuxContainerExecutor.java | 58 +++---
 .../recovery/TestZKRMStateStorePerf.java   | 23 -
 4 files changed, 66 insertions(+), 41 deletions(-)

diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml 
b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
index 8f3d3f1..54a5943 100644
--- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
+++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
@@ -119,7 +119,12 @@
 
 
 
- 
+
+  
+  
+  
+
 
 
 
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index c5fc481..b8f94b8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -21,12 +21,7 @@ package org.apache.hadoop.yarn.server.nodemanager;
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
 
-import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
-import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
+import com.google.common.annotations.VisibleForTesting;
 import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -38,7 +33,7 @@ import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
-
+import java.util.Optional;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.fs.FileContext;
@@ -46,6 +41,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.service.ServiceStateException;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.CommandExecutor;
@@ -60,15 +56,16 @@ import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.runtime.ContainerExecutionException;
+import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerExecContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerLivenessContext;
 import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerReapContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The {@code DefaultContainerExecuter} class offers generic container
@@ -333,7 +330,7 @@ public class DefaultContainerExecutor extends 
ContainerExecutor {
 builder.append

[hadoop] branch trunk updated (639acb6 -> 2f500e4)

2020-07-06 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 639acb6  HADOOP-17111. Replace Guava Optional with Java8+ Optional. 
Contributed by Ahmed Hussein.
 add 2f500e4  HADOOP-17081. MetricsSystem doesn't start the sink adapters 
on restart (#2089)

No new revisions were added by this update.

Summary of changes:
 .../hadoop/metrics2/impl/MetricsSystemImpl.java |  6 +-
 .../hadoop/metrics2/impl/TestMetricsSystemImpl.java | 21 +
 2 files changed, 26 insertions(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-17081. MetricsSystem doesn't start the sink adapters on restart (#2089)

2020-07-06 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 0789ae5  HADOOP-17081. MetricsSystem doesn't start the sink adapters 
on restart (#2089)
0789ae5 is described below

commit 0789ae5b78a8ded1cea6d8db668f9fd68fa64f7a
Author: Madhusoodan Pataki 
AuthorDate: Mon Jul 6 20:55:42 2020 +0530

HADOOP-17081. MetricsSystem doesn't start the sink adapters on restart 
(#2089)


Contributed by Madhusoodan P
---
 .../hadoop/metrics2/impl/MetricsSystemImpl.java |  6 +-
 .../hadoop/metrics2/impl/TestMetricsSystemImpl.java | 21 +
 2 files changed, 26 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
index 624edc9..cf4b4a9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
@@ -273,7 +273,11 @@ public class MetricsSystemImpl extends MetricsSystem 
implements MetricsSource {
   T register(final String name, final String description, final T sink) {
 LOG.debug(name +", "+ description);
 if (allSinks.containsKey(name)) {
-  LOG.warn("Sink "+ name +" already exists!");
+  if(sinks.get(name) == null) {
+registerSink(name, description, sink);
+  } else {
+LOG.warn("Sink "+ name +" already exists!");
+  }
   return sink;
 }
 allSinks.put(name, sink);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
index 47a3b4c..1b40a17 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
@@ -639,4 +639,25 @@ public class TestMetricsSystemImpl {
   private static String getPluginUrlsAsString() {
 return "file:metrics2-test-plugin.jar";
   }
+
+  @Test
+  public void testMetricSystemRestart() {
+MetricsSystemImpl ms = new MetricsSystemImpl("msRestartTestSystem");
+TestSink ts = new TestSink();
+String sinkName = "restartTestSink";
+
+try {
+  ms.start();
+  ms.register(sinkName, "", ts);
+  assertNotNull("no adapter exists for " + sinkName,
+  ms.getSinkAdapter(sinkName));
+  ms.stop();
+
+  ms.start();
+  assertNotNull("no adapter exists for " + sinkName,
+  ms.getSinkAdapter(sinkName));
+} finally {
+  ms.stop();
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-17081. MetricsSystem doesn't start the sink adapters on restart (#2089)

2020-07-06 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new dfb74a3  HADOOP-17081. MetricsSystem doesn't start the sink adapters 
on restart (#2089)
dfb74a3 is described below

commit dfb74a3eeb43462b3c8527fbc9164caaed72c4fb
Author: Madhusoodan Pataki 
AuthorDate: Mon Jul 6 20:55:42 2020 +0530

HADOOP-17081. MetricsSystem doesn't start the sink adapters on restart 
(#2089)


Contributed by Madhusoodan P
---
 .../hadoop/metrics2/impl/MetricsSystemImpl.java |  6 +-
 .../hadoop/metrics2/impl/TestMetricsSystemImpl.java | 21 +
 2 files changed, 26 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
index 624edc9..cf4b4a9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
@@ -273,7 +273,11 @@ public class MetricsSystemImpl extends MetricsSystem 
implements MetricsSource {
   T register(final String name, final String description, final T sink) {
 LOG.debug(name +", "+ description);
 if (allSinks.containsKey(name)) {
-  LOG.warn("Sink "+ name +" already exists!");
+  if(sinks.get(name) == null) {
+registerSink(name, description, sink);
+  } else {
+LOG.warn("Sink "+ name +" already exists!");
+  }
   return sink;
 }
 allSinks.put(name, sink);
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
index f3a2553..47520b5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
@@ -639,4 +639,25 @@ public class TestMetricsSystemImpl {
   private static String getPluginUrlsAsString() {
 return "file:metrics2-test-plugin.jar";
   }
+
+  @Test
+  public void testMetricSystemRestart() {
+MetricsSystemImpl ms = new MetricsSystemImpl("msRestartTestSystem");
+TestSink ts = new TestSink();
+String sinkName = "restartTestSink";
+
+try {
+  ms.start();
+  ms.register(sinkName, "", ts);
+  assertNotNull("no adapter exists for " + sinkName,
+  ms.getSinkAdapter(sinkName));
+  ms.stop();
+
+  ms.start();
+  assertNotNull("no adapter exists for " + sinkName,
+  ms.getSinkAdapter(sinkName));
+} finally {
+  ms.stop();
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15451. Do not discard non-initial block report for provided storage. (#2119). Contributed by Shanyu Zhao.

2020-07-06 Thread hexiaoqiao
This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 834372f  HDFS-15451. Do not discard non-initial block report for 
provided storage. (#2119). Contributed by Shanyu Zhao.
834372f is described below

commit 834372f4040f1e7a00720da5c40407f9b1423b6d
Author: Shanyu Zhao 
AuthorDate: Mon Jul 6 08:43:34 2020 -0700

HDFS-15451. Do not discard non-initial block report for provided storage. 
(#2119). Contributed by Shanyu Zhao.

Signed-off-by: He Xiaoqiao 
---
 .../hdfs/server/blockmanagement/BlockManager.java  |  1 +
 .../server/blockmanagement/TestBlockManager.java   | 53 ++
 2 files changed, 54 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7f0f17e..f2cd6b9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2759,6 +2759,7 @@ public class BlockManager implements BlockStatsMXBean {
 storageInfo = node.updateStorage(storage);
   }
   if (namesystem.isInStartupSafeMode()
+  && !StorageType.PROVIDED.equals(storageInfo.getStorageType())
   && storageInfo.getBlockReportCount() > 0) {
 blockLog.info("BLOCK* processReport 0x{}: "
 + "discarded non-initial block report from {}"
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 11ed5ba..695377a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -49,9 +49,11 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
@@ -1052,6 +1054,57 @@ public class TestBlockManager {
   }
 
   @Test
+  public void testSafeModeWithProvidedStorageBR() throws Exception {
+DatanodeDescriptor node0 = spy(nodes.get(0));
+DatanodeStorageInfo ds0 = node0.getStorageInfos()[0];
+node0.setAlive(true);
+DatanodeDescriptor node1 = spy(nodes.get(1));
+DatanodeStorageInfo ds1 = node1.getStorageInfos()[0];
+node1.setAlive(true);
+
+String providedStorageID = DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT;
+DatanodeStorage providedStorage = new DatanodeStorage(
+providedStorageID, DatanodeStorage.State.NORMAL, StorageType.PROVIDED);
+
+// create block manager with provided storage enabled
+Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true);
+conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
+TestProvidedImpl.TestFileRegionBlockAliasMap.class,
+BlockAliasMap.class);
+BlockManager bmPs = new BlockManager(fsn, false, conf);
+bmPs.setBlockPoolId("BP-12344-10.1.1.2-12344");
+
+// pretend to be in safemode
+doReturn(true).when(fsn).isInStartupSafeMode();
+
+// register new node
+DatanodeRegistration nodeReg0 =
+new DatanodeRegistration(node0, null, null, "");
+bmPs.getDatanodeManager().registerDatanode(nodeReg0);
+bmPs.getDatanodeManager().addDatanode(node0);
+DatanodeRegistration nodeReg1 =
+new DatanodeRegistration(node1, null, null, "");
+bmPs.getDatanodeManager().registerDatanode(nodeReg1);
+bmPs.getDatanodeManager().addDatanode(node1);
+
+// process reports of provided storage and disk storage
+bmPs.processReport(node0, providedStorage, BlockListAsLongs.EMPTY, null);
+bmPs.processReport(node0, new DatanodeStorage(ds0.getStorageID()),
+BlockListAsLongs.EMPTY, null);
+bmPs.processRe

[hadoop] branch branch-3.3.0 updated: Updated the index as per 3.3.0 release

2020-07-06 Thread brahma
This is an automated email from the ASF dual-hosted git repository.

brahma pushed a commit to branch branch-3.3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3.0 by this push:
 new aa96f18  Updated the index as per 3.3.0 release
aa96f18 is described below

commit aa96f1871bfd858f9bac59cf2a81ec470da649af
Author: Brahma Reddy Battula 
AuthorDate: Mon Jul 6 23:24:25 2020 +0530

Updated the index as per 3.3.0 release
---
 hadoop-project/src/site/markdown/index.md.vm | 237 +--
 1 file changed, 39 insertions(+), 198 deletions(-)

diff --git a/hadoop-project/src/site/markdown/index.md.vm 
b/hadoop-project/src/site/markdown/index.md.vm
index 438145a..78d8a47 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -16,10 +16,7 @@ Apache Hadoop ${project.version}
 
 
 Apache Hadoop ${project.version} incorporates a number of significant
-enhancements over the previous major release line (hadoop-2.x).
-
-This release is generally available (GA), meaning that it represents a point of
-API stability and quality that we consider production-ready.
+enhancements over the previous major release line (hadoop-3.2).
 
 Overview
 
@@ -27,224 +24,68 @@ Overview
 Users are encouraged to read the full set of release notes.
 This page provides an overview of the major changes.
 
-Minimum required Java version increased from Java 7 to Java 8
---
+ARM Support
+
+This is the first release to support ARM architectures.
 
-All Hadoop JARs are now compiled targeting a runtime version of Java 8.
-Users still using Java 7 or below must upgrade to Java 8.
+Upgrade protobuf from 2.5.0 to something newer
+-
+Protobuf upgraded to 3.7.1 as protobuf-2.5.0 reached EOL.
 
-Support for erasure coding in HDFS
+Java 11 runtime support
 --
 
-Erasure coding is a method for durably storing data with significant space
-savings compared to replication. Standard encodings like Reed-Solomon (10,4)
-have a 1.4x space overhead, compared to the 3x overhead of standard HDFS
-replication.
-
-Since erasure coding imposes additional overhead during reconstruction
-and performs mostly remote reads, it has traditionally been used for
-storing colder, less frequently accessed data. Users should consider
-the network and CPU overheads of erasure coding when deploying this
-feature.
-
-More details are available in the
-[HDFS Erasure Coding](./hadoop-project-dist/hadoop-hdfs/HDFSErasureCoding.html)
-documentation.
-
-YARN Timeline Service v.2

-
-We are introducing an early preview (alpha 2) of a major revision of YARN
-Timeline Service: v.2. YARN Timeline Service v.2 addresses two major
-challenges: improving scalability and reliability of Timeline Service, and
-enhancing usability by introducing flows and aggregation.
-
-YARN Timeline Service v.2 alpha 2 is provided so that users and developers
-can test it and provide feedback and suggestions for making it a ready
-replacement for Timeline Service v.1.x. It should be used only in a test
-capacity.
-
-More details are available in the
-[YARN Timeline Service 
v.2](./hadoop-yarn/hadoop-yarn-site/TimelineServiceV2.html)
-documentation.
-
-Shell script rewrite

+Java 11 runtime support is completed.
 
-The Hadoop shell scripts have been rewritten to fix many long-standing
-bugs and include some new features.  While an eye has been kept towards
-compatibility, some changes may break existing installations.
+Support impersonation for AuthenticationFilter
+-
 
-Incompatible changes are documented in the release notes, with related
-discussion on [HADOOP-9902](https://issues.apache.org/jira/browse/HADOOP-9902).
+External services or YARN service may need to call into WebHDFS or YARN REST 
API on behave of the user using web
+protocols. It would be good to support impersonation mechanism in 
AuthenticationFilter or similar extensions.
 
-More details are available in the
-[Unix Shell Guide](./hadoop-project-dist/hadoop-common/UnixShellGuide.html)
-documentation. Power users will also be pleased by the
-[Unix Shell API](./hadoop-project-dist/hadoop-common/UnixShellAPI.html)
-documentation, which describes much of the new functionality, particularly
-related to extensibility.
 
-Shaded client jars
+s3A Enhancements
 --
+Lots of enhancements to the S3A code including Delegation Token support, 
better handling of 404 caching,
+ S3guard performance, resilience improvements
 
-The `hadoop-client` Maven artifact available in 2.x releases pulls
-Hadoop's transitive dependencies onto a Hadoop application's classpath.
-This can be problematic if the versions of these transitive dependencies
-conflict with the versions used by the application.
-
-

[hadoop] annotated tag release-3.3.0-RC0 created (now 970fe5b)

2020-07-06 Thread brahma
This is an automated email from the ASF dual-hosted git repository.

brahma pushed a change to annotated tag release-3.3.0-RC0
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 970fe5b  (tag)
 tagging aa96f1871bfd858f9bac59cf2a81ec470da649af (commit)
 replaces remove-ozone
  by Brahma Reddy Battula
  on Tue Jul 7 03:01:07 2020 +0530

- Log -
Release candidate - hadoop-3.3.0-RC0
-BEGIN PGP SIGNATURE-
Version: GnuPG v1

iQIcBAABAgAGBQJfA5gbAAoJEDhtgO+B50aaoS0QAI9J0dls4aOXc6N0rYinnG7e
UXZuAAQTXRVtwLSnS6BG3jkU2ocgVSo5GJCw3+MVpcIcR3/BpoY7taLZWxshXI/I
S0KpANwcz8eiHR8jFsI1C+3HMKDFdE05bZqOTMLBHes5DyWHAdTSJfnpdKMtumH5
KxVrcYCyHvAEAG7kr/3dTg/BedPU81mdmFE9tihQGBC2eAUEIojUVaOzXFCJK1oV
oacaaWYha3ryzL9E+VJCOMqZEYbzYsFV4XAhxvGGZMvuKx6MnKDWdf1xYsFi+USI
0uJhQsrAk4NjdM5Ve4nVo0r2LnCuEQ5HkCFADCc9LlRPYkkwgn3xjtGxjR8S6iiz
HF+ZrlxYNSnueNcPv7/bgIVbjLLb08rvOjZ2nwtEiA+G6Zr0o5HSA0/Yg5zwjEmw
tLpnyoRSSQXqn+RGyh2BCuYZ+3TOakZF3muscyMDy+mCibpb6pW8EZsZeg+0Bzu4
IIZbcErGJDgJGrInuUFjj0b0WbJ4vDVVA6a6ZCeCC0ySladkeXeztwCWNRNMfWxj
Li8xPf5O39h3jICEvKTE6ePTVMpKwBakiasif1qi/HD810Avhq8PU2IemrmiCXhz
s2sQ2tQNrSL4w7WjV83dJkR5jmBga/Lsk9B2FSXgO5ZLfo+5q+vn0mnVhYPgP8/O
aSkbMa/eBRo9UsZ1d/iJ
=KHBg
-END PGP SIGNATURE-
---

No new revisions were added by this update.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15417. RBF: Get the datanode report from cache for federation WebHDFS operations (#2080)

2020-07-06 Thread sunchao
This is an automated email from the ASF dual-hosted git repository.

sunchao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e820baa6 HDFS-15417. RBF: Get the datanode report from cache for 
federation WebHDFS operations (#2080)
e820baa6 is described below

commit e820baa6e6f7e850ba62cbf150d760bd0ea6d0e0
Author: Ye Ni <141253+nick...@users.noreply.github.com>
AuthorDate: Mon Jul 6 16:17:09 2020 -0700

HDFS-15417. RBF: Get the datanode report from cache for federation WebHDFS 
operations (#2080)
---
 .../server/federation/router/RouterRpcServer.java  | 123 -
 .../federation/router/RouterWebHdfsMethods.java|   9 +-
 .../server/federation/router/TestRouterRpc.java|  70 +++-
 3 files changed, 191 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 4f1310b..5905a1d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -26,6 +26,8 @@ import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_COUNT_KEY;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DN_REPORT_CACHE_EXPIRE;
+import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DN_REPORT_CACHE_EXPIRE_MS_DEFAULT;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -41,7 +43,19 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
@@ -219,6 +233,9 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol,
   private static final ThreadLocal CUR_USER =
   new ThreadLocal<>();
 
+  /** DN type -> full DN report. */
+  private final LoadingCache dnCache;
+
   /**
* Construct a router RPC server.
*
@@ -361,6 +378,23 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol,
 this.nnProto = new RouterNamenodeProtocol(this);
 this.clientProto = new RouterClientProtocol(conf, this);
 this.routerProto = new RouterUserProtocol(this);
+
+long dnCacheExpire = conf.getTimeDuration(
+DN_REPORT_CACHE_EXPIRE,
+DN_REPORT_CACHE_EXPIRE_MS_DEFAULT, TimeUnit.MILLISECONDS);
+this.dnCache = CacheBuilder.newBuilder()
+.build(new DatanodeReportCacheLoader());
+
+// Actively refresh the dn cache in a configured interval
+Executors
+.newSingleThreadScheduledExecutor()
+.scheduleWithFixedDelay(() -> this.dnCache
+.asMap()
+.keySet()
+.parallelStream()
+.forEach((key) -> this.dnCache.refresh(key)),
+0,
+dnCacheExpire, TimeUnit.MILLISECONDS);
   }
 
   @Override
@@ -869,6 +903,50 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol,
   }
 
   /**
+   * Get the datanode report from cache.
+   *
+   * @param type Type of the datanode.
+   * @return List of datanodes.
+   * @throws IOException If it cannot get the report.
+   */
+  DatanodeInfo[] getCachedDatanodeReport(DatanodeReportType type)
+  throws IOException {
+try {
+  DatanodeInfo[] dns = this.dnCache.get(type);
+  if (dns == null) {
+LOG.debug("Get null DN report from cache");
+dns = getCachedDatanodeReportImpl(type);
+this.dnCache.put(type, dns);
+  }
+  return dns;
+} catch (ExecutionException e) {
+  LOG.error("Cannot

[hadoop] branch trunk updated: HDFS-15449. Optionally ignore port number in mount-table name when picking from initialized uri. Contributed by Uma Maheswara Rao G.

2020-07-06 Thread umamahesh
This is an automated email from the ASF dual-hosted git repository.

umamahesh pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new dc0626b  HDFS-15449. Optionally ignore port number in mount-table name 
when picking from initialized uri. Contributed by Uma Maheswara Rao G.
dc0626b is described below

commit dc0626b5f2f2ba0bd3919650ea231cedd424f77a
Author: Uma Maheswara Rao G 
AuthorDate: Mon Jul 6 18:50:03 2020 -0700

HDFS-15449. Optionally ignore port number in mount-table name when picking 
from initialized uri. Contributed by Uma Maheswara Rao G.
---
 .../org/apache/hadoop/fs/viewfs/Constants.java | 13 +++
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java| 10 -
 .../fs/viewfs/ViewFileSystemOverloadScheme.java| 13 ++-
 .../src/site/markdown/ViewFsOverloadScheme.md  |  8 +++-
 ...SystemOverloadSchemeHdfsFileSystemContract.java |  4 ++
 ...ViewFileSystemOverloadSchemeWithHdfsScheme.java | 45 +-
 ...stViewFileSystemOverloadSchemeWithDFSAdmin.java | 17 
 ...ViewFileSystemOverloadSchemeWithFSCommands.java |  2 +-
 8 files changed, 97 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
index 28ebf73..492cb87 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
@@ -104,4 +104,17 @@ public interface Constants {
   "fs.viewfs.mount.links.as.symlinks";
 
   boolean CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT = true;
+
+  /**
+   * When initializing the viewfs, authority will be used as the mount table
+   * name to find the mount link configurations. To make the mount table name
+   * unique, we may want to ignore port if initialized uri authority contains
+   * port number. By default, we will consider port number also in
+   * ViewFileSystem(This default value false, because to support existing
+   * deployments continue with the current behavior).
+   */
+  String CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME =
+  "fs.viewfs.ignore.port.in.mount.table.name";
+
+  boolean CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT = false;
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index cb36965..0beeda2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.fs.viewfs;
 import static 
org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;
 import static 
org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE;
 import static 
org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE_DEFAULT;
+import static 
org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
+import static 
org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT;
 import static 
org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS;
 import static 
org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_MOUNT_LINKS_AS_SYMLINKS_DEFAULT;
 import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
@@ -274,9 +276,15 @@ public class ViewFileSystem extends FileSystem {
 final InnerCache innerCache = new InnerCache(fsGetter);
 // Now build  client side view (i.e. client side mount table) from config.
 final String authority = theUri.getAuthority();
+String tableName = authority;
+if (theUri.getPort() != -1 && config
+.getBoolean(CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME,
+CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT)) {
+  tableName = theUri.getHost();
+}
 try {
   myUri = new URI(getScheme(), authority, "/", null, null);
-  fsState = new InodeTree(conf, authority) {
+  fsState = new InodeTree(conf, tableName) {
 @Override
 protected FileSystem getTargetFileSystem(final URI uri)
   throws URISyntaxException, IOException {
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
index 672022b..2f3359d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemOverloadScheme.java
+++ 
b/hadoop-commo

[hadoop] branch trunk updated (dc0626b -> f77bbc2)

2020-07-06 Thread inigoiri
This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from dc0626b  HDFS-15449. Optionally ignore port number in mount-table name 
when picking from initialized uri. Contributed by Uma Maheswara Rao G.
 add f77bbc2  HDFS-15312. Apply umask when creating directory by WebHDFS 
(#2096)

No new revisions were added by this update.

Summary of changes:
 .../src/main/webapps/router/explorer.js| 29 ++
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.js  | 29 ++
 2 files changed, 58 insertions(+)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-15451. Do not discard non-initial block report for provided storage. (#2119). Contributed by Shanyu Zhao.

2020-07-06 Thread hexiaoqiao
This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new d787b93  HDFS-15451. Do not discard non-initial block report for 
provided storage. (#2119). Contributed by Shanyu Zhao.
d787b93 is described below

commit d787b9373a3302c704da4e9bc6e8549b523c9bed
Author: Shanyu Zhao 
AuthorDate: Mon Jul 6 08:43:34 2020 -0700

HDFS-15451. Do not discard non-initial block report for provided storage. 
(#2119). Contributed by Shanyu Zhao.

Signed-off-by: He Xiaoqiao 
---
 .../hdfs/server/blockmanagement/BlockManager.java  |  1 +
 .../server/blockmanagement/TestBlockManager.java   | 53 ++
 2 files changed, 54 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 17d5603..b80e58f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2689,6 +2689,7 @@ public class BlockManager implements BlockStatsMXBean {
 storageInfo = node.updateStorage(storage);
   }
   if (namesystem.isInStartupSafeMode()
+  && !StorageType.PROVIDED.equals(storageInfo.getStorageType())
   && storageInfo.getBlockReportCount() > 0) {
 blockLog.info("BLOCK* processReport 0x{}: "
 + "discarded non-initial block report from {}"
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 623ab1a..6adbb95 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -47,9 +47,11 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
@@ -1003,6 +1005,57 @@ public class TestBlockManager {
   }
 
   @Test
+  public void testSafeModeWithProvidedStorageBR() throws Exception {
+DatanodeDescriptor node0 = spy(nodes.get(0));
+DatanodeStorageInfo ds0 = node0.getStorageInfos()[0];
+node0.setAlive(true);
+DatanodeDescriptor node1 = spy(nodes.get(1));
+DatanodeStorageInfo ds1 = node1.getStorageInfos()[0];
+node1.setAlive(true);
+
+String providedStorageID = DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT;
+DatanodeStorage providedStorage = new DatanodeStorage(
+providedStorageID, DatanodeStorage.State.NORMAL, StorageType.PROVIDED);
+
+// create block manager with provided storage enabled
+Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true);
+conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
+TestProvidedImpl.TestFileRegionBlockAliasMap.class,
+BlockAliasMap.class);
+BlockManager bmPs = new BlockManager(fsn, false, conf);
+bmPs.setBlockPoolId("BP-12344-10.1.1.2-12344");
+
+// pretend to be in safemode
+doReturn(true).when(fsn).isInStartupSafeMode();
+
+// register new node
+DatanodeRegistration nodeReg0 =
+new DatanodeRegistration(node0, null, null, "");
+bmPs.getDatanodeManager().registerDatanode(nodeReg0);
+bmPs.getDatanodeManager().addDatanode(node0);
+DatanodeRegistration nodeReg1 =
+new DatanodeRegistration(node1, null, null, "");
+bmPs.getDatanodeManager().registerDatanode(nodeReg1);
+bmPs.getDatanodeManager().addDatanode(node1);
+
+// process reports of provided storage and disk storage
+bmPs.processReport(node0, providedStorage, BlockListAsLongs.EMPTY, null);
+bmPs.processReport(node0, new DatanodeStorage(ds0.getStorageID()),
+BlockListAsLongs.EMPTY, null);
+bmPs

[hadoop] branch branch-3.2 updated: HDFS-15451. Do not discard non-initial block report for provided storage. (#2119). Contributed by Shanyu Zhao.

2020-07-06 Thread hexiaoqiao
This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 8d40288  HDFS-15451. Do not discard non-initial block report for 
provided storage. (#2119). Contributed by Shanyu Zhao.
8d40288 is described below

commit 8d4028864d0492e8f64f94db2339fbd55f97083a
Author: Shanyu Zhao 
AuthorDate: Mon Jul 6 08:43:34 2020 -0700

HDFS-15451. Do not discard non-initial block report for provided storage. 
(#2119). Contributed by Shanyu Zhao.

Signed-off-by: He Xiaoqiao 
---
 .../hdfs/server/blockmanagement/BlockManager.java  |  1 +
 .../server/blockmanagement/TestBlockManager.java   | 53 ++
 2 files changed, 54 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3185f1d..4486e47 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2702,6 +2702,7 @@ public class BlockManager implements BlockStatsMXBean {
 storageInfo = node.updateStorage(storage);
   }
   if (namesystem.isInStartupSafeMode()
+  && !StorageType.PROVIDED.equals(storageInfo.getStorageType())
   && storageInfo.getBlockReportCount() > 0) {
 blockLog.info("BLOCK* processReport 0x{}: "
 + "discarded non-initial block report from {}"
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index d393eaa..fea80e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -46,9 +46,11 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
@@ -1002,6 +1004,57 @@ public class TestBlockManager {
   }
 
   @Test
+  public void testSafeModeWithProvidedStorageBR() throws Exception {
+DatanodeDescriptor node0 = spy(nodes.get(0));
+DatanodeStorageInfo ds0 = node0.getStorageInfos()[0];
+node0.setAlive(true);
+DatanodeDescriptor node1 = spy(nodes.get(1));
+DatanodeStorageInfo ds1 = node1.getStorageInfos()[0];
+node1.setAlive(true);
+
+String providedStorageID = DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT;
+DatanodeStorage providedStorage = new DatanodeStorage(
+providedStorageID, DatanodeStorage.State.NORMAL, StorageType.PROVIDED);
+
+// create block manager with provided storage enabled
+Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true);
+conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
+TestProvidedImpl.TestFileRegionBlockAliasMap.class,
+BlockAliasMap.class);
+BlockManager bmPs = new BlockManager(fsn, false, conf);
+bmPs.setBlockPoolId("BP-12344-10.1.1.2-12344");
+
+// pretend to be in safemode
+doReturn(true).when(fsn).isInStartupSafeMode();
+
+// register new node
+DatanodeRegistration nodeReg0 =
+new DatanodeRegistration(node0, null, null, "");
+bmPs.getDatanodeManager().registerDatanode(nodeReg0);
+bmPs.getDatanodeManager().addDatanode(node0);
+DatanodeRegistration nodeReg1 =
+new DatanodeRegistration(node1, null, null, "");
+bmPs.getDatanodeManager().registerDatanode(nodeReg1);
+bmPs.getDatanodeManager().addDatanode(node1);
+
+// process reports of provided storage and disk storage
+bmPs.processReport(node0, providedStorage, BlockListAsLongs.EMPTY, null);
+bmPs.processReport(node0, new DatanodeStorage(ds0.getStorageID()),
+BlockListAsLongs.EMPTY, null);
+bmPs

[hadoop] branch branch-3.3 updated: HDFS-15451. Do not discard non-initial block report for provided storage. (#2119). Contributed by Shanyu Zhao.

2020-07-06 Thread hexiaoqiao
This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 10c9df1  HDFS-15451. Do not discard non-initial block report for 
provided storage. (#2119). Contributed by Shanyu Zhao.
10c9df1 is described below

commit 10c9df1d0a75b4f8d2736bf09cce67ff22b56ded
Author: Shanyu Zhao 
AuthorDate: Mon Jul 6 08:43:34 2020 -0700

HDFS-15451. Do not discard non-initial block report for provided storage. 
(#2119). Contributed by Shanyu Zhao.

Signed-off-by: He Xiaoqiao 
---
 .../hdfs/server/blockmanagement/BlockManager.java  |  1 +
 .../server/blockmanagement/TestBlockManager.java   | 53 ++
 2 files changed, 54 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ad61c71..e3da9a1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2745,6 +2745,7 @@ public class BlockManager implements BlockStatsMXBean {
 storageInfo = node.updateStorage(storage);
   }
   if (namesystem.isInStartupSafeMode()
+  && !StorageType.PROVIDED.equals(storageInfo.getStorageType())
   && storageInfo.getBlockReportCount() > 0) {
 blockLog.info("BLOCK* processReport 0x{}: "
 + "discarded non-initial block report from {}"
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 11ed5ba..695377a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -49,9 +49,11 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestProvidedImpl;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
@@ -1052,6 +1054,57 @@ public class TestBlockManager {
   }
 
   @Test
+  public void testSafeModeWithProvidedStorageBR() throws Exception {
+DatanodeDescriptor node0 = spy(nodes.get(0));
+DatanodeStorageInfo ds0 = node0.getStorageInfos()[0];
+node0.setAlive(true);
+DatanodeDescriptor node1 = spy(nodes.get(1));
+DatanodeStorageInfo ds1 = node1.getStorageInfos()[0];
+node1.setAlive(true);
+
+String providedStorageID = DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT;
+DatanodeStorage providedStorage = new DatanodeStorage(
+providedStorageID, DatanodeStorage.State.NORMAL, StorageType.PROVIDED);
+
+// create block manager with provided storage enabled
+Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_PROVIDED_ENABLED, true);
+conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS,
+TestProvidedImpl.TestFileRegionBlockAliasMap.class,
+BlockAliasMap.class);
+BlockManager bmPs = new BlockManager(fsn, false, conf);
+bmPs.setBlockPoolId("BP-12344-10.1.1.2-12344");
+
+// pretend to be in safemode
+doReturn(true).when(fsn).isInStartupSafeMode();
+
+// register new node
+DatanodeRegistration nodeReg0 =
+new DatanodeRegistration(node0, null, null, "");
+bmPs.getDatanodeManager().registerDatanode(nodeReg0);
+bmPs.getDatanodeManager().addDatanode(node0);
+DatanodeRegistration nodeReg1 =
+new DatanodeRegistration(node1, null, null, "");
+bmPs.getDatanodeManager().registerDatanode(nodeReg1);
+bmPs.getDatanodeManager().addDatanode(node1);
+
+// process reports of provided storage and disk storage
+bmPs.processReport(node0, providedStorage, BlockListAsLongs.EMPTY, null);
+bmPs.processReport(node0, new DatanodeStorage(ds0.getStorageID()),
+BlockListAsLongs.EMPTY, null);
+bmPs