[hadoop] branch trunk updated: HDFS-16440. RBF: Support router get HAServiceStatus with Lifeline RPC address (#3971)

2022-02-15 Thread inigoiri
This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 48bef28  HDFS-16440. RBF: Support router get HAServiceStatus with 
Lifeline RPC address (#3971)
48bef28 is described below

commit 48bef285a85e311b52335c8687c6e9a5f8afdf70
Author: YulongZ 
AuthorDate: Wed Feb 16 00:44:17 2022 +0800

HDFS-16440. RBF: Support router get HAServiceStatus with Lifeline RPC 
address (#3971)
---
 .../router/NamenodeHeartbeatService.java   | 10 +++-
 .../router/TestRouterNamenodeHeartbeat.java| 56 --
 2 files changed, 61 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
index 8858341..e1c8a68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java
@@ -48,6 +48,8 @@ import org.codehaus.jettison.json.JSONObject;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hadoop.classification.VisibleForTesting;
+
 /**
  * The {@link Router} periodically checks the state of a Namenode (usually on
  * the same server) and reports their high availability (HA) state and
@@ -346,7 +348,8 @@ public class NamenodeHeartbeatService extends 
PeriodicService {
   // Determine if NN is active
   // TODO: dynamic timeout
   if (localTargetHAProtocol == null) {
-localTargetHAProtocol = localTarget.getProxy(conf, 30*1000);
+localTargetHAProtocol = localTarget.getHealthMonitorProxy(conf, 
30*1000);
+LOG.debug("Get HA status with address {}", lifelineAddress);
   }
   HAServiceStatus status = localTargetHAProtocol.getServiceStatus();
   report.setHAServiceState(status.getState());
@@ -373,6 +376,11 @@ public class NamenodeHeartbeatService extends 
PeriodicService {
 return report;
   }
 
+  @VisibleForTesting
+  NNHAServiceTarget getLocalTarget(){
+return this.localTarget;
+  }
+
   /**
* Get the description of the Namenode to monitor.
* @return Description of the Namenode to monitor.
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
index 9bf149c..94f2bae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
@@ -212,6 +212,50 @@ public class TestRouterNamenodeHeartbeat {
   }
 
   @Test
+  public void testNamenodeHeartbeatServiceHAServiceProtocolProxy(){
+testNamenodeHeartbeatServiceHAServiceProtocol(
+"test-ns", "nn", 1000, -1, -1, 1003,
+"host01.test:1000", "host02.test:1000");
+testNamenodeHeartbeatServiceHAServiceProtocol(
+"test-ns", "nn", 1000, 1001, -1, 1003,
+"host01.test:1001", "host02.test:1001");
+testNamenodeHeartbeatServiceHAServiceProtocol(
+"test-ns", "nn", 1000, -1, 1002, 1003,
+"host01.test:1002", "host02.test:1002");
+testNamenodeHeartbeatServiceHAServiceProtocol(
+"test-ns", "nn", 1000, 1001, 1002, 1003,
+"host01.test:1002", "host02.test:1002");
+  }
+
+  private void testNamenodeHeartbeatServiceHAServiceProtocol(
+  String nsId, String nnId,
+  int rpcPort, int servicePort,
+  int lifelinePort, int webAddressPort,
+  String expected0, String expected1) {
+Configuration conf = generateNamenodeConfiguration(nsId, nnId,
+rpcPort, servicePort, lifelinePort, webAddressPort);
+
+Router testRouter = new Router();
+testRouter.setConf(conf);
+
+Collection heartbeatServices =
+testRouter.createNamenodeHeartbeatServices();
+
+assertEquals(2, heartbeatServices.size());
+
+Iterator iterator = heartbeatServices.iterator();
+NamenodeHeartbeatService service0 = iterator.next();
+service0.init(conf);
+assertNotNull(service0.getLocalTarget());
+assertEquals(expected0, 
service0.getLocalTarget().getHealthMonitorAddress().toString());
+
+NamenodeHeartbeatService service1 = iterator.next();
+service1.init(conf);
+assertNotNull(service1.getLocalTarget());
+assertEquals(expected1, 
service1.getLocalTarget().getHealth

[hadoop] branch branch-2.10 updated: HDFS-10650. DFSClient#mkdirs and DFSClient#primitiveMkdir should use default directory permission. Contributed by John Zhuge.

2022-02-15 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new cb5af00  HDFS-10650. DFSClient#mkdirs and DFSClient#primitiveMkdir 
should use default directory permission. Contributed by John Zhuge.
cb5af00 is described below

commit cb5af0012ed94bb8ab63cfdcdfcd1ab17f6660bb
Author: Xiao Chen 
AuthorDate: Thu Jul 28 13:15:02 2016 -0700

HDFS-10650. DFSClient#mkdirs and DFSClient#primitiveMkdir should use 
default directory permission. Contributed by John Zhuge.
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java| 14 ++
 .../java/org/apache/hadoop/security/TestPermission.java| 10 ++
 2 files changed, 20 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index ad4e499..32553fb 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1209,6 +1209,13 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 return permission.applyUMask(dfsClientConf.getUMask());
   }
 
+  private FsPermission applyUMaskDir(FsPermission permission) {
+if (permission == null) {
+  permission = FsPermission.getDirDefault();
+}
+return permission.applyUMask(dfsClientConf.getUMask());
+  }
+
   /**
* Same as {@link #create(String, FsPermission, EnumSet, boolean, short, 
long,
* Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
@@ -2458,7 +2465,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
*
* @param src The path of the directory being created
* @param permission The permission of the directory being created.
-   * If permission == null, use {@link FsPermission#getDefault()}.
+   * If permission == null, use {@link FsPermission#getDirDefault()}.
* @param createParent create missing parent directory if true
*
* @return True if the operation success.
@@ -2467,7 +2474,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
*/
   public boolean mkdirs(String src, FsPermission permission,
   boolean createParent) throws IOException {
-final FsPermission masked = applyUMask(permission);
+final FsPermission masked = applyUMaskDir(permission);
 return primitiveMkdir(src, masked, createParent);
   }
 
@@ -2488,9 +2495,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   boolean createParent) throws IOException {
 checkOpen();
 if (absPermission == null) {
-  absPermission = applyUMask(null);
+  absPermission = applyUMaskDir(null);
 }
-
 LOG.debug("{}: masked={}", src, absPermission);
 try (TraceScope ignored = tracer.newScope("mkdir")) {
   return namenode.mkdirs(src, absPermission, createParent);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 5e4f693..d3a4956 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -53,6 +53,7 @@ public class TestPermission {
   final private static Path ROOT_PATH = new Path("/data");
   final private static Path CHILD_DIR1 = new Path(ROOT_PATH, "child1");
   final private static Path CHILD_DIR2 = new Path(ROOT_PATH, "child2");
+  final private static Path CHILD_DIR3 = new Path(ROOT_PATH, "child3");
   final private static Path CHILD_FILE1 = new Path(ROOT_PATH, "file1");
   final private static Path CHILD_FILE2 = new Path(ROOT_PATH, "file2");
   final private static Path CHILD_FILE3 = new Path(ROOT_PATH, "file3");
@@ -237,6 +238,9 @@ public class TestPermission {
   
   // following dir/file creations are legal
   nnfs.mkdirs(CHILD_DIR1);
+  status = nnfs.getFileStatus(CHILD_DIR1);
+  assertThat("Expect 755 = 777 (default dir) - 022 (default umask)",
+  status.getPermission().toString(), is("rwxr-xr-x"));
   out = nnfs.create(CHILD_FILE1);
   status = nnfs.getFileStatus(CHILD_FILE1);
   assertTrue(status.getPermission().toString().equals("rw-r--r--"));
@@ -248,6 +252,12 @@ public class TestPermission {
   status = nnfs.getFileStatus(CHILD_FILE1);
   assertTrue(status.getPermission().toString().equals("rwx--"));
 
+  // mkdirs with null permission
+  nnfs.mkdirs(CHILD_DIR3, null);
+  status = nnfs.getFileStatus(CHILD_DIR3);
+  assertThat("Expect 75

[hadoop] branch branch-3.3 updated: HDFS-15745. Make DataNodePeerMetrics#LOW_THRESHOLD_MS and MIN_OUTLIER_DETECTION_NODES configurable. Contributed by Haibin Huang. (#3992)

2022-02-15 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 4c57fb4  HDFS-15745. Make DataNodePeerMetrics#LOW_THRESHOLD_MS and 
MIN_OUTLIER_DETECTION_NODES configurable. Contributed by Haibin Huang. (#3992)
4c57fb4 is described below

commit 4c57fb4d6bb4cfe9b9269dd07b09270a969c34cf
Author: Takanobu Asanuma 
AuthorDate: Wed Feb 16 09:42:43 2022 +0900

HDFS-15745. Make DataNodePeerMetrics#LOW_THRESHOLD_MS and 
MIN_OUTLIER_DETECTION_NODES configurable. Contributed by Haibin Huang. (#3992)

(cherry picked from commit 1cd96e8dd81b220cc197011e80cd8e298c566e1a)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

Reviewed-by: Ayush Saxena 
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  8 
 .../datanode/metrics/DataNodePeerMetrics.java  | 24 +++---
 .../src/main/resources/hdfs-default.xml| 16 +++
 3 files changed, 41 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 7196def..dc2bcbe 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -650,6 +650,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final long
   DFS_DATANODE_PEER_METRICS_MIN_OUTLIER_DETECTION_SAMPLES_DEFAULT =
   1000;
+  public static final String DFS_DATANODE_MIN_OUTLIER_DETECTION_NODES_KEY =
+  "dfs.datanode.min.outlier.detection.nodes";
+  public static final long DFS_DATANODE_MIN_OUTLIER_DETECTION_NODES_DEFAULT =
+  10L;
+  public static final String DFS_DATANODE_SLOWPEER_LOW_THRESHOLD_MS_KEY =
+  "dfs.datanode.slowpeer.low.threshold.ms";
+  public static final long DFS_DATANODE_SLOWPEER_LOW_THRESHOLD_MS_DEFAULT =
+  5L;
   public static final String DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY =
   "dfs.datanode.min.outlier.detection.disks";
   public static final long DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_DEFAULT =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodePeerMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodePeerMetrics.java
index 3c70a23..750e53d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodePeerMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodePeerMetrics.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.hdfs.server.datanode.metrics;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.metrics2.MetricsJsonBuilder;
 import org.apache.hadoop.metrics2.lib.MutableRollingAverages;
 import org.slf4j.Logger;
@@ -48,11 +49,6 @@ public class DataNodePeerMetrics {
 
   private final String name;
 
-  /**
-   * Threshold in milliseconds below which a DataNode is definitely not slow.
-   */
-  private static final long LOW_THRESHOLD_MS = 5;
-  private static final long MIN_OUTLIER_DETECTION_NODES = 10;
 
   private final OutlierDetector slowNodeDetector;
 
@@ -62,14 +58,28 @@ public class DataNodePeerMetrics {
* outlier detection is skipped.
*/
   private final long minOutlierDetectionSamples;
+  /**
+   * Threshold in milliseconds below which a DataNode is definitely not slow.
+   */
+  private final long lowThresholdMs;
+  /**
+   * Minimum number of nodes to run outlier detection.
+   */
+  private final long minOutlierDetectionNodes;
 
   public DataNodePeerMetrics(final String name, Configuration conf) {
 this.name = name;
 minOutlierDetectionSamples = conf.getLong(
 DFS_DATANODE_PEER_METRICS_MIN_OUTLIER_DETECTION_SAMPLES_KEY,
 DFS_DATANODE_PEER_METRICS_MIN_OUTLIER_DETECTION_SAMPLES_DEFAULT);
-this.slowNodeDetector = new OutlierDetector(MIN_OUTLIER_DETECTION_NODES,
-LOW_THRESHOLD_MS);
+lowThresholdMs =
+conf.getLong(DFSConfigKeys.DFS_DATANODE_SLOWPEER_LOW_THRESHOLD_MS_KEY,
+DFSConfigKeys.DFS_DATANODE_SLOWPEER_LOW_THRESHOLD_MS_DEFAULT);
+minOutlierDetectionNodes =
+
conf.getLong(DFSConfigKeys.DFS_DATANODE_MIN_OUTLIER_DETECTION_NODES_KEY,
+DFSConfigKeys.DFS_DATANODE_MIN_OUTLIER_DETECTION_NODES_DEFAULT);
+this.slowNodeDetector =
+new OutlierDetector(minOutl

[hadoop] branch trunk updated: HADOOP-18109. Ensure that default permissions of directories under internal ViewFS directories are the same as directories on target filesystems. Contributed by Chentao

2022-02-15 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 19d90e6  HADOOP-18109. Ensure that default permissions of directories 
under internal ViewFS directories are the same as directories on target 
filesystems. Contributed by Chentao Yu. (3953)
19d90e6 is described below

commit 19d90e62fb28539f8c79bbb24f703301489825a6
Author: Chentao Yu 
AuthorDate: Thu Apr 15 17:46:40 2021 -0700

HADOOP-18109. Ensure that default permissions of directories under internal 
ViewFS directories are the same as directories on target filesystems. 
Contributed by Chentao Yu. (3953)
---
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java   |  5 -
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java  | 19 +++
 2 files changed, 19 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 5ff3c2b..8c3cdb8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -1666,11 +1666,6 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
-public boolean mkdirs(Path dir) throws IOException {
-  return mkdirs(dir, null);
-}
-
-@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index fcb5257..fdc7464 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -479,4 +479,23 @@ public class TestViewFileSystemHdfs extends 
ViewFileSystemBaseTest {
 assertEquals("The owner did not match ", owner, 
userUgi.getShortUserName());
 otherfs.delete(user1Path, false);
   }
+
+  @Test
+  public void testInternalDirectoryPermissions() throws IOException {
+LOG.info("Starting testInternalDirectoryPermissions!");
+Configuration localConf = new Configuration(conf);
+ConfigUtil.addLinkFallback(
+localConf, new Path(targetTestRoot, "fallbackDir").toUri());
+FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, localConf);
+// check that the default permissions on a sub-folder of an internal
+// directory are the same as those created on non-internal directories.
+Path subDirOfInternalDir = new Path("/internalDir/dir1");
+fs.mkdirs(subDirOfInternalDir);
+
+Path subDirOfRealDir = new Path("/internalDir/linkToDir2/dir1");
+fs.mkdirs(subDirOfRealDir);
+
+assertEquals(fs.getFileStatus(subDirOfInternalDir).getPermission(),
+fs.getFileStatus(subDirOfRealDir).getPermission());
+  }
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HDFS-16396. Reconfig slow peer parameters for datanode (#3827)

2022-02-15 Thread tasanuma
This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new db67952  HDFS-16396. Reconfig slow peer parameters for datanode (#3827)
db67952 is described below

commit db67952f9f38305ca263c033ed13fe772f79f300
Author: litao 
AuthorDate: Tue Feb 15 12:40:46 2022 +0800

HDFS-16396. Reconfig slow peer parameters for datanode (#3827)

Reviewed-by: Ayush Saxena 
(cherry picked from commit 0c194f2157ff4473ea95dff1d7d40c386398f4a4)
---
 .../hdfs/server/datanode/BPServiceActor.java   |  2 +-
 .../hadoop/hdfs/server/datanode/BlockReceiver.java |  4 +-
 .../apache/hadoop/hdfs/server/datanode/DNConf.java |  6 +-
 .../hadoop/hdfs/server/datanode/DataNode.java  | 70 -
 .../hadoop/hdfs/server/datanode/DataXceiver.java   |  2 +-
 .../datanode/metrics/DataNodePeerMetrics.java  | 57 +++---
 .../server/datanode/metrics/OutlierDetector.java   | 20 -
 .../datanode/TestDataNodeReconfiguration.java  | 87 ++
 .../org/apache/hadoop/hdfs/tools/TestDFSAdmin.java |  2 +-
 9 files changed, 230 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 8f0286c..c8f18fa 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -534,7 +534,7 @@ class BPServiceActor implements Runnable {
 volumeFailureSummary.getFailedStorageLocations().length : 0;
 final boolean outliersReportDue = scheduler.isOutliersReportDue(now);
 final SlowPeerReports slowPeers =
-outliersReportDue && dn.getPeerMetrics() != null ?
+outliersReportDue && dnConf.peerStatsEnabled && dn.getPeerMetrics() != 
null ?
 SlowPeerReports.create(dn.getPeerMetrics().getOutliers()) :
 SlowPeerReports.EMPTY_REPORT;
 final SlowDiskReports slowDisks =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 315914a..e20f437 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -878,7 +878,7 @@ class BlockReceiver implements Closeable {
*/
   private void trackSendPacketToLastNodeInPipeline(final long elapsedMs) {
 final DataNodePeerMetrics peerMetrics = datanode.getPeerMetrics();
-if (peerMetrics != null && isPenultimateNode) {
+if (datanode.getDnConf().peerStatsEnabled && peerMetrics != null && 
isPenultimateNode) {
   peerMetrics.addSendPacketDownstream(mirrorNameForMetrics, elapsedMs);
 }
   }
@@ -1093,7 +1093,7 @@ class BlockReceiver implements Closeable {
 if (downstreams != null && downstreams.length > 0) {
   downstreamDNs = downstreams;
   isPenultimateNode = (downstreams.length == 1);
-  if (isPenultimateNode && datanode.getPeerMetrics() != null) {
+  if (isPenultimateNode && datanode.getDnConf().peerStatsEnabled) {
 mirrorNameForMetrics = (downstreams[0].getInfoSecurePort() != 0 ?
 downstreams[0].getInfoSecureAddr() : downstreams[0].getInfoAddr());
 LOG.debug("Will collect peer metrics for downstream node {}",
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
index 40d0df3..563fbde 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
@@ -108,7 +108,7 @@ public class DNConf {
   private final long lifelineIntervalMs;
   volatile long blockReportInterval;
   volatile long blockReportSplitThreshold;
-  final boolean peerStatsEnabled;
+  volatile boolean peerStatsEnabled;
   final boolean diskStatsEnabled;
   final long outliersReportIntervalMs;
   final long ibrInterval;
@@ -507,4 +507,8 @@ public class DNConf {
 dn.getConf().set(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, delayMs);
 initBlockReportDelay();
   }
+
+  void setPeerStatsEnabled(boolean enablePeerStats) {
+peerStatsEnabled = enablePeerStats;
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/had

[hadoop] branch branch-3.3 updated: HADOOP-18109. Ensure that default permissions of directories under internal ViewFS directories are the same as directories on target filesystems. Contributed by Che

2022-02-15 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new d14a7c6  HADOOP-18109. Ensure that default permissions of directories 
under internal ViewFS directories are the same as directories on target 
filesystems. Contributed by Chentao Yu. (3953)
d14a7c6 is described below

commit d14a7c6ee5e881235d18d4cf9241197a1817d745
Author: Chentao Yu 
AuthorDate: Thu Apr 15 17:46:40 2021 -0700

HADOOP-18109. Ensure that default permissions of directories under internal 
ViewFS directories are the same as directories on target filesystems. 
Contributed by Chentao Yu. (3953)

(cherry picked from commit 19d90e62fb28539f8c79bbb24f703301489825a6)
---
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java   |  5 -
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java  | 19 +++
 2 files changed, 19 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index 7503edd..8f333d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -1580,11 +1580,6 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
-public boolean mkdirs(Path dir) throws IOException {
-  return mkdirs(dir, null);
-}
-
-@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index fcb5257..fdc7464 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -479,4 +479,23 @@ public class TestViewFileSystemHdfs extends 
ViewFileSystemBaseTest {
 assertEquals("The owner did not match ", owner, 
userUgi.getShortUserName());
 otherfs.delete(user1Path, false);
   }
+
+  @Test
+  public void testInternalDirectoryPermissions() throws IOException {
+LOG.info("Starting testInternalDirectoryPermissions!");
+Configuration localConf = new Configuration(conf);
+ConfigUtil.addLinkFallback(
+localConf, new Path(targetTestRoot, "fallbackDir").toUri());
+FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, localConf);
+// check that the default permissions on a sub-folder of an internal
+// directory are the same as those created on non-internal directories.
+Path subDirOfInternalDir = new Path("/internalDir/dir1");
+fs.mkdirs(subDirOfInternalDir);
+
+Path subDirOfRealDir = new Path("/internalDir/linkToDir2/dir1");
+fs.mkdirs(subDirOfRealDir);
+
+assertEquals(fs.getFileStatus(subDirOfInternalDir).getPermission(),
+fs.getFileStatus(subDirOfRealDir).getPermission());
+  }
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-18109. Ensure that default permissions of directories under internal ViewFS directories are the same as directories on target filesystems. Contributed by Che

2022-02-15 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 5066722  HADOOP-18109. Ensure that default permissions of directories 
under internal ViewFS directories are the same as directories on target 
filesystems. Contributed by Chentao Yu. (3953)
5066722 is described below

commit 5066722eb70b82a532b55ce98a2623852fd195c8
Author: Chentao Yu 
AuthorDate: Thu Apr 15 17:46:40 2021 -0700

HADOOP-18109. Ensure that default permissions of directories under internal 
ViewFS directories are the same as directories on target filesystems. 
Contributed by Chentao Yu. (3953)

(cherry picked from commit 19d90e62fb28539f8c79bbb24f703301489825a6)
---
 .../org/apache/hadoop/fs/viewfs/ViewFileSystem.java   |  5 -
 .../hadoop/fs/viewfs/TestViewFileSystemHdfs.java  | 19 +++
 2 files changed, 19 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index a430727..0ff2e73 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -1496,11 +1496,6 @@ public class ViewFileSystem extends FileSystem {
 }
 
 @Override
-public boolean mkdirs(Path dir) throws IOException {
-  return mkdirs(dir, null);
-}
-
-@Override
 public FSDataInputStream open(Path f, int bufferSize)
 throws AccessControlException, FileNotFoundException, IOException {
   checkPathIsSlash(f);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index fcb5257..fdc7464 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -479,4 +479,23 @@ public class TestViewFileSystemHdfs extends 
ViewFileSystemBaseTest {
 assertEquals("The owner did not match ", owner, 
userUgi.getShortUserName());
 otherfs.delete(user1Path, false);
   }
+
+  @Test
+  public void testInternalDirectoryPermissions() throws IOException {
+LOG.info("Starting testInternalDirectoryPermissions!");
+Configuration localConf = new Configuration(conf);
+ConfigUtil.addLinkFallback(
+localConf, new Path(targetTestRoot, "fallbackDir").toUri());
+FileSystem fs = FileSystem.get(FsConstants.VIEWFS_URI, localConf);
+// check that the default permissions on a sub-folder of an internal
+// directory are the same as those created on non-internal directories.
+Path subDirOfInternalDir = new Path("/internalDir/dir1");
+fs.mkdirs(subDirOfInternalDir);
+
+Path subDirOfRealDir = new Path("/internalDir/linkToDir2/dir1");
+fs.mkdirs(subDirOfRealDir);
+
+assertEquals(fs.getFileStatus(subDirOfInternalDir).getPermission(),
+fs.getFileStatus(subDirOfRealDir).getPermission());
+  }
 }

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org