This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 1030ac8  HDFS-14235. Handle ArrayIndexOutOfBoundsException in 
DataNodeDiskMetrics#slowDiskDetectionDaemon. Contributed by Ranith Sardar.
1030ac8 is described below

commit 1030ac85bbfa44e775c03ff17e067d1087d7a475
Author: Surendra Singh Lilhore <surendralilh...@apache.org>
AuthorDate: Wed Feb 20 16:56:10 2019 +0530

    HDFS-14235. Handle ArrayIndexOutOfBoundsException in 
DataNodeDiskMetrics#slowDiskDetectionDaemon. Contributed by Ranith Sardar.
    
    (cherry picked from commit 41e18feda3f5ff924c87c4bed5b5cbbaecb19ae1)
    (cherry picked from commit b93b127956508072904b44098fdc1c0dfc899606)
---
 .../datanode/metrics/DataNodeDiskMetrics.java      | 78 ++++++++++++----------
 1 file changed, 43 insertions(+), 35 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java
index f2954e8..a8a6c85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java
@@ -57,6 +57,10 @@ public class DataNodeDiskMetrics {
   private volatile Map<String, Map<DiskOp, Double>>
       diskOutliersStats = Maps.newHashMap();
 
+  // Adding for test purpose. When addSlowDiskForTesting() called from test
+  // code, status should not be overridden by daemon thread.
+  private boolean overrideStatus = true;
+
   public DataNodeDiskMetrics(DataNode dn, long diskOutlierDetectionIntervalMs) 
{
     this.dn = dn;
     this.detectionInterval = diskOutlierDetectionIntervalMs;
@@ -71,41 +75,43 @@ public class DataNodeDiskMetrics {
       @Override
       public void run() {
         while (shouldRun) {
-          Map<String, Double> metadataOpStats = Maps.newHashMap();
-          Map<String, Double> readIoStats = Maps.newHashMap();
-          Map<String, Double> writeIoStats = Maps.newHashMap();
-          FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
-          try {
-            fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
-            Iterator<FsVolumeSpi> volumeIterator = fsVolumeReferences
-                .iterator();
-            while (volumeIterator.hasNext()) {
-              FsVolumeSpi volume = volumeIterator.next();
-              DataNodeVolumeMetrics metrics = 
volumeIterator.next().getMetrics();
-              String volumeName = volume.getBaseURI().getPath();
-
-              metadataOpStats.put(volumeName,
-                  metrics.getMetadataOperationMean());
-              readIoStats.put(volumeName, metrics.getReadIoMean());
-              writeIoStats.put(volumeName, metrics.getWriteIoMean());
-            }
-          } finally {
-            if (fsVolumeReferences != null) {
-              try {
-                fsVolumeReferences.close();
-              } catch (IOException e) {
-                LOG.error("Error in releasing FS Volume references", e);
+          if (dn.getFSDataset() != null) {
+            Map<String, Double> metadataOpStats = Maps.newHashMap();
+            Map<String, Double> readIoStats = Maps.newHashMap();
+            Map<String, Double> writeIoStats = Maps.newHashMap();
+            FsDatasetSpi.FsVolumeReferences fsVolumeReferences = null;
+            try {
+              fsVolumeReferences = dn.getFSDataset().getFsVolumeReferences();
+              Iterator<FsVolumeSpi> volumeIterator = fsVolumeReferences
+                  .iterator();
+              while (volumeIterator.hasNext()) {
+                FsVolumeSpi volume = volumeIterator.next();
+                DataNodeVolumeMetrics metrics = volume.getMetrics();
+                String volumeName = volume.getBaseURI().getPath();
+
+                metadataOpStats.put(volumeName,
+                    metrics.getMetadataOperationMean());
+                readIoStats.put(volumeName, metrics.getReadIoMean());
+                writeIoStats.put(volumeName, metrics.getWriteIoMean());
+              }
+            } finally {
+              if (fsVolumeReferences != null) {
+                try {
+                  fsVolumeReferences.close();
+                } catch (IOException e) {
+                  LOG.error("Error in releasing FS Volume references", e);
+                }
               }
             }
-          }
-          if (metadataOpStats.isEmpty() && readIoStats.isEmpty() &&
-              writeIoStats.isEmpty()) {
-            LOG.debug("No disk stats available for detecting outliers.");
-            return;
-          }
+            if (metadataOpStats.isEmpty() && readIoStats.isEmpty()
+                && writeIoStats.isEmpty()) {
+              LOG.debug("No disk stats available for detecting outliers.");
+              continue;
+            }
 
-          detectAndUpdateDiskOutliers(metadataOpStats, readIoStats,
-              writeIoStats);
+            detectAndUpdateDiskOutliers(metadataOpStats, readIoStats,
+                writeIoStats);
+          }
 
           try {
             Thread.sleep(detectionInterval);
@@ -143,9 +149,10 @@ public class DataNodeDiskMetrics {
     for (Map.Entry<String, Double> entry : writeIoOutliers.entrySet()) {
       addDiskStat(diskStats, entry.getKey(), DiskOp.WRITE, entry.getValue());
     }
-
-    diskOutliersStats = diskStats;
-    LOG.debug("Updated disk outliers.");
+    if (overrideStatus) {
+      diskOutliersStats = diskStats;
+      LOG.debug("Updated disk outliers.");
+    }
   }
 
   private void addDiskStat(Map<String, Map<DiskOp, Double>> diskStats,
@@ -176,6 +183,7 @@ public class DataNodeDiskMetrics {
   @VisibleForTesting
   public void addSlowDiskForTesting(String slowDiskPath,
       Map<DiskOp, Double> latencies) {
+    overrideStatus = false;
     if (latencies == null) {
       diskOutliersStats.put(slowDiskPath, ImmutableMap.of());
     } else {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to