anujmodi2021 commented on code in PR #8137:
URL: https://github.com/apache/hadoop/pull/8137#discussion_r2648019050


##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java:
##########
@@ -201,20 +201,39 @@ public void initializeWriteResourceUtilizationMetrics() {
 
 
   @Override
-  public void initializeMetrics(MetricFormat metricFormat) {
+  public void initializeMetrics(final MetricFormat metricFormat,
+      final AbfsConfiguration abfsConfiguration) {
     switch (metricFormat) {
-      case INTERNAL_BACKOFF_METRIC_FORMAT:
-        abfsBackoffMetrics = new AbfsBackoffMetrics();
-        break;
-      case INTERNAL_FOOTER_METRIC_FORMAT:
-        abfsReadFooterMetrics = new AbfsReadFooterMetrics();
-        break;
-      case INTERNAL_METRIC_FORMAT:
-        abfsBackoffMetrics = new AbfsBackoffMetrics();
-        abfsReadFooterMetrics = new AbfsReadFooterMetrics();
-        break;
-      default:
-        break;
+    case INTERNAL_BACKOFF_METRIC_FORMAT:
+      abfsBackoffMetrics = new AbfsBackoffMetrics(
+          abfsConfiguration.isBackoffRetryMetricsEnabled());
+      break;
+    case INTERNAL_FOOTER_METRIC_FORMAT:
+      initializeReadFooterMetrics();
+    case INTERNAL_METRIC_FORMAT:
+      abfsBackoffMetrics = new AbfsBackoffMetrics(
+          abfsConfiguration.isBackoffRetryMetricsEnabled());
+      initializeReadFooterMetrics();
+      break;
+    default:
+      break;
+    }
+  }
+
+  /**
+   * Initialize the read footer metrics.
+   * In case the metrics are already initialized,
+   * create a new instance with the existing map.
+   */
+  private void initializeReadFooterMetrics() {
+    if (abfsReadFooterMetrics == null) {
+      abfsReadFooterMetrics = new AbfsReadFooterMetrics();
+    } else {
+      //In case metrics is emitted based on total count, there could be a 
chance
+      // that file type for which we have calculated the type will be lost.
+      // To avoid that, creating a new instance with existing map.
+      abfsReadFooterMetrics = new AbfsReadFooterMetrics(

Review Comment:
   Not able to get this.
   How can the file type will be lost? It must be a part of 
AbfsReadFooterMetrics object only



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsReadFooterMetrics.java:
##########
@@ -62,476 +63,536 @@
  * This class is responsible for tracking and updating metrics related to 
reading footers in files.
  */
 public class AbfsReadFooterMetrics extends AbstractAbfsStatisticsSource {
-    private static final Logger LOG = 
LoggerFactory.getLogger(AbfsReadFooterMetrics.class);
-    private static final String FOOTER_LENGTH = "20";
-    private static final List<FileType> FILE_TYPE_LIST =
-            Arrays.asList(FileType.values());
-    private final Map<String, FileTypeMetrics> fileTypeMetricsMap =
-            new ConcurrentHashMap<>();
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      AbfsReadFooterMetrics.class);
+

Review Comment:
   Nit: Empty line changes can be reverted.



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsReadFooterMetrics.java:
##########
@@ -62,476 +63,536 @@
  * This class is responsible for tracking and updating metrics related to 
reading footers in files.
  */
 public class AbfsReadFooterMetrics extends AbstractAbfsStatisticsSource {
-    private static final Logger LOG = 
LoggerFactory.getLogger(AbfsReadFooterMetrics.class);
-    private static final String FOOTER_LENGTH = "20";
-    private static final List<FileType> FILE_TYPE_LIST =
-            Arrays.asList(FileType.values());
-    private final Map<String, FileTypeMetrics> fileTypeMetricsMap =
-            new ConcurrentHashMap<>();
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      AbfsReadFooterMetrics.class);
+
+  private static final String FOOTER_LENGTH = "20";
+
+  private static final List<FileType> FILE_TYPE_LIST =
+      Arrays.asList(FileType.values());
+
+  private Map<String, FileTypeMetrics> fileTypeMetricsMap =
+      new ConcurrentHashMap<>();
+
+  /**
+   * Inner class to handle file type checks.
+   */
+  private static final class FileTypeMetrics {
+
+    private final AtomicBoolean collectMetrics;
+
+    private final AtomicBoolean collectMetricsForNextRead;
+
+    private final AtomicBoolean collectLenMetrics;
+
+    private final AtomicLong readCount;
+
+    private final AtomicLong offsetOfFirstRead;
+
+    private FileType fileType = null;
+
+    private String sizeReadByFirstRead;
+
+    private String offsetDiffBetweenFirstAndSecondRead;
 
     /**
-     * Inner class to handle file type checks.
+     * Constructor to initialize the file type metrics.
      */
-    private static final class FileTypeMetrics {
-        private final AtomicBoolean collectMetrics;
-        private final AtomicBoolean collectMetricsForNextRead;
-        private final AtomicBoolean collectLenMetrics;
-        private final AtomicLong readCount;
-        private final AtomicLong offsetOfFirstRead;
-        private FileType fileType = null;
-        private String sizeReadByFirstRead;
-        private String offsetDiffBetweenFirstAndSecondRead;
-
-        /**
-         * Constructor to initialize the file type metrics.
-         */
-        private FileTypeMetrics() {
-            collectMetrics = new AtomicBoolean(false);
-            collectMetricsForNextRead = new AtomicBoolean(false);
-            collectLenMetrics = new AtomicBoolean(false);
-            readCount = new AtomicLong(0);
-            offsetOfFirstRead = new AtomicLong(0);
-        }
-
-        /**
-         * Updates the file type based on the metrics collected.
-         */
-        private void updateFileType() {
-            if (fileType == null) {
-                fileType = collectMetrics.get() && readCount.get() >= 2
-                        && haveEqualValues(sizeReadByFirstRead)
-                        && 
haveEqualValues(offsetDiffBetweenFirstAndSecondRead) ? PARQUET : NON_PARQUET;
-            }
-        }
-
-        /**
-         * Checks if the given value has equal parts.
-         *
-         * @param value the value to check
-         * @return true if the value has equal parts, false otherwise
-         */
-        private boolean haveEqualValues(String value) {
-            String[] parts = value.split("_");
-            return parts.length == 2
-                    && parts[0].equals(parts[1]);
-        }
-
-        /**
-         * Increments the read count.
-         */
-        private void incrementReadCount() {
-            readCount.incrementAndGet();
-        }
-
-        /**
-         * Returns the read count.
-         *
-         * @return the read count
-         */
-        private long getReadCount() {
-            return readCount.get();
-        }
-
-        /**
-         * Sets the collect metrics flag.
-         *
-         * @param collect the value to set
-         */
-        private void setCollectMetrics(boolean collect) {
-            collectMetrics.set(collect);
-        }
-
-        /**
-         * Returns the collect metrics flag.
-         *
-         * @return the collect metrics flag
-         */
-        private boolean getCollectMetrics() {
-            return collectMetrics.get();
-        }
-
-        /**
-         * Sets the collect metrics for the next read flag.
-         *
-         * @param collect the value to set
-         */
-        private void setCollectMetricsForNextRead(boolean collect) {
-            collectMetricsForNextRead.set(collect);
-        }
-
-        /**
-         * Returns the collect metrics for the next read flag.
-         *
-         * @return the collect metrics for the next read flag
-         */
-        private boolean getCollectMetricsForNextRead() {
-            return collectMetricsForNextRead.get();
-        }
-
-        /**
-         * Returns the collect length metrics flag.
-         *
-         * @return the collect length metrics flag
-         */
-        private boolean getCollectLenMetrics() {
-            return collectLenMetrics.get();
-        }
-
-        /**
-         * Sets the collect length metrics flag.
-         *
-         * @param collect the value to set
-         */
-        private void setCollectLenMetrics(boolean collect) {
-            collectLenMetrics.set(collect);
-        }
-
-        /**
-         * Sets the offset of the first read.
-         *
-         * @param offset the value to set
-         */
-        private void setOffsetOfFirstRead(long offset) {
-            offsetOfFirstRead.set(offset);
-        }
-
-        /**
-         * Returns the offset of the first read.
-         *
-         * @return the offset of the first read
-         */
-        private long getOffsetOfFirstRead() {
-            return offsetOfFirstRead.get();
-        }
-
-        /**
-         * Sets the size read by the first read.
-         *
-         * @param size the value to set
-         */
-        private void setSizeReadByFirstRead(String size) {
-            sizeReadByFirstRead = size;
-        }
-
-        /**
-         * Returns the size read by the first read.
-         *
-         * @return the size read by the first read
-         */
-        private String getSizeReadByFirstRead() {
-            return sizeReadByFirstRead;
-        }
-
-        /**
-         * Sets the offset difference between the first and second read.
-         *
-         * @param offsetDiff the value to set
-         */
-        private void setOffsetDiffBetweenFirstAndSecondRead(String offsetDiff) 
{
-            offsetDiffBetweenFirstAndSecondRead = offsetDiff;
-        }
-
-        /**
-         * Returns the offset difference between the first and second read.
-         *
-         * @return the offset difference between the first and second read
-         */
-        private String getOffsetDiffBetweenFirstAndSecondRead() {
-            return offsetDiffBetweenFirstAndSecondRead;
-        }
-
-        /**
-         * Returns the file type.
-         *
-         * @return the file type
-         */
-        private FileType getFileType() {
-            return fileType;
-        }
+    private FileTypeMetrics() {
+      collectMetrics = new AtomicBoolean(false);
+      collectMetricsForNextRead = new AtomicBoolean(false);
+      collectLenMetrics = new AtomicBoolean(false);
+      readCount = new AtomicLong(0);
+      offsetOfFirstRead = new AtomicLong(0);
     }
 
     /**
-     * Constructor to initialize the IOStatisticsStore with counters and mean 
statistics.
+     * Updates the file type based on the metrics collected.

Review Comment:
   Explain in javadoc what logic is used to infer file type



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/utils/TracingHeaderVersion.java:
##########
@@ -47,7 +47,11 @@ public enum TracingHeaderVersion {
    *         
:position:operatedBlobCount:operationSpecificHeader:httpOperationHeader
    *         :aggregatedMetrics:resourceUtilizationMetrics
    */
-  V2("v2", 15);
+  V2("v2", 15),
+  /**
+   * Metrics to client request id header.
+   */
+  AV0("av0", 3);

Review Comment:
   Add what 3 fields will be there in comment as added in other versions javadoc



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java:
##########
@@ -201,20 +201,39 @@ public void initializeWriteResourceUtilizationMetrics() {
 
 
   @Override
-  public void initializeMetrics(MetricFormat metricFormat) {
+  public void initializeMetrics(final MetricFormat metricFormat,
+      final AbfsConfiguration abfsConfiguration) {
     switch (metricFormat) {
-      case INTERNAL_BACKOFF_METRIC_FORMAT:
-        abfsBackoffMetrics = new AbfsBackoffMetrics();
-        break;
-      case INTERNAL_FOOTER_METRIC_FORMAT:
-        abfsReadFooterMetrics = new AbfsReadFooterMetrics();
-        break;
-      case INTERNAL_METRIC_FORMAT:
-        abfsBackoffMetrics = new AbfsBackoffMetrics();
-        abfsReadFooterMetrics = new AbfsReadFooterMetrics();
-        break;
-      default:
-        break;
+    case INTERNAL_BACKOFF_METRIC_FORMAT:
+      abfsBackoffMetrics = new AbfsBackoffMetrics(
+          abfsConfiguration.isBackoffRetryMetricsEnabled());
+      break;
+    case INTERNAL_FOOTER_METRIC_FORMAT:
+      initializeReadFooterMetrics();
+    case INTERNAL_METRIC_FORMAT:
+      abfsBackoffMetrics = new AbfsBackoffMetrics(
+          abfsConfiguration.isBackoffRetryMetricsEnabled());
+      initializeReadFooterMetrics();
+      break;
+    default:
+      break;
+    }
+  }
+
+  /**
+   * Initialize the read footer metrics.
+   * In case the metrics are already initialized,
+   * create a new instance with the existing map.
+   */
+  private void initializeReadFooterMetrics() {
+    if (abfsReadFooterMetrics == null) {
+      abfsReadFooterMetrics = new AbfsReadFooterMetrics();
+    } else {
+      //In case metrics is emitted based on total count, there could be a 
chance

Review Comment:
   Typo, space after //



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java:
##########
@@ -201,20 +201,39 @@ public void initializeWriteResourceUtilizationMetrics() {
 
 
   @Override
-  public void initializeMetrics(MetricFormat metricFormat) {
+  public void initializeMetrics(final MetricFormat metricFormat,
+      final AbfsConfiguration abfsConfiguration) {

Review Comment:
   Instead of passing AbfsConfiguration here, better to only pass 
isRetryMetricsEnabled.
   Whole object is not needed



##########
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/ITestAbfsReadFooterMetrics.java:
##########
@@ -57,32 +54,14 @@
 import org.apache.hadoop.fs.azurebfs.utils.TracingHeaderValidator;
 import org.apache.hadoop.fs.azurebfs.constants.FSOperationType;
 
-import static org.assertj.core.api.Assumptions.assumeThat;
-
 public class ITestAbfsReadFooterMetrics extends AbstractAbfsScaleTest {
 
-  public ITestAbfsReadFooterMetrics() throws Exception {
-    checkPrerequisites();
-  }
-
-  private void checkPrerequisites(){

Review Comment:
   Are these moved somewhere else?



##########
hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AbfsCountersImpl.java:
##########
@@ -201,20 +201,39 @@ public void initializeWriteResourceUtilizationMetrics() {
 
 
   @Override
-  public void initializeMetrics(MetricFormat metricFormat) {
+  public void initializeMetrics(final MetricFormat metricFormat,
+      final AbfsConfiguration abfsConfiguration) {
     switch (metricFormat) {
-      case INTERNAL_BACKOFF_METRIC_FORMAT:
-        abfsBackoffMetrics = new AbfsBackoffMetrics();
-        break;
-      case INTERNAL_FOOTER_METRIC_FORMAT:
-        abfsReadFooterMetrics = new AbfsReadFooterMetrics();
-        break;
-      case INTERNAL_METRIC_FORMAT:
-        abfsBackoffMetrics = new AbfsBackoffMetrics();
-        abfsReadFooterMetrics = new AbfsReadFooterMetrics();
-        break;
-      default:
-        break;
+    case INTERNAL_BACKOFF_METRIC_FORMAT:
+      abfsBackoffMetrics = new AbfsBackoffMetrics(
+          abfsConfiguration.isBackoffRetryMetricsEnabled());
+      break;
+    case INTERNAL_FOOTER_METRIC_FORMAT:
+      initializeReadFooterMetrics();
+    case INTERNAL_METRIC_FORMAT:
+      abfsBackoffMetrics = new AbfsBackoffMetrics(
+          abfsConfiguration.isBackoffRetryMetricsEnabled());
+      initializeReadFooterMetrics();
+      break;
+    default:
+      break;
+    }
+  }
+
+  /**
+   * Initialize the read footer metrics.
+   * In case the metrics are already initialized,
+   * create a new instance with the existing map.

Review Comment:
   Why create a new instance?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to