This is an automated email from the ASF dual-hosted git repository.

siyao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 6ba64bf  HDDS-4359. Expose VolumeIOStats in DN JMX (#1506)
6ba64bf is described below

commit 6ba64bf3dce5e759ee21061b433c387be6b4d911
Author: Siyao Meng <50227127+smen...@users.noreply.github.com>
AuthorDate: Wed Oct 21 10:23:32 2020 -0700

    HDDS-4359. Expose VolumeIOStats in DN JMX (#1506)
---
 .../ozone/container/common/volume/HddsVolume.java  |  8 ++-
 .../container/common/volume/VolumeIOStats.java     | 72 ++++++++++++++--------
 .../container/metrics/TestContainerMetrics.java    | 14 +++++
 3 files changed, 68 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index c3a5a41..66cd657 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -165,7 +165,7 @@ public class HddsVolume
       this.state = VolumeState.NOT_INITIALIZED;
       this.clusterID = b.clusterID;
       this.datanodeUuid = b.datanodeUuid;
-      this.volumeIOStats = new VolumeIOStats();
+      this.volumeIOStats = new VolumeIOStats(b.volumeRootStr);
 
       volumeInfo = new VolumeInfo.Builder(b.volumeRootStr, b.conf)
           .storageType(b.storageType)
@@ -400,6 +400,9 @@ public class HddsVolume
     if (volumeInfo != null) {
       volumeInfo.shutdownUsageThread();
     }
+    if (volumeIOStats != null) {
+      volumeIOStats.unregister();
+    }
   }
 
   public void shutdown() {
@@ -407,6 +410,9 @@ public class HddsVolume
     if (volumeInfo != null) {
       volumeInfo.shutdownUsageThread();
     }
+    if (volumeIOStats != null) {
+      volumeIOStats.unregister();
+    }
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java
index 9e2eb22..c5533cd 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java
@@ -18,27 +18,49 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 
 /**
  * This class is used to track Volume IO stats for each HDDS Volume.
  */
 public class VolumeIOStats {
+  private String metricsSourceName = VolumeIOStats.class.getSimpleName();
 
-  private final AtomicLong readBytes;
-  private final AtomicLong readOpCount;
-  private final AtomicLong writeBytes;
-  private final AtomicLong writeOpCount;
-  private final AtomicLong readTime;
-  private final AtomicLong writeTime;
+  private @Metric MutableCounterLong readBytes;
+  private @Metric MutableCounterLong readOpCount;
+  private @Metric MutableCounterLong writeBytes;
+  private @Metric MutableCounterLong writeOpCount;
+  private @Metric MutableCounterLong readTime;
+  private @Metric MutableCounterLong writeTime;
 
+  @Deprecated
   public VolumeIOStats() {
-    readBytes = new AtomicLong(0);
-    readOpCount = new AtomicLong(0);
-    writeBytes = new AtomicLong(0);
-    writeOpCount = new AtomicLong(0);
-    readTime = new AtomicLong(0);
-    writeTime = new AtomicLong(0);
+    init();
+  }
+
+  /**
+   * @param identifier Typically, path to volume root. e.g. /data/hdds
+   */
+  public VolumeIOStats(String identifier) {
+    this.metricsSourceName += '-' + identifier;
+    init();
+  }
+
+  public void init() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    ms.register(metricsSourceName, "Volume I/O Statistics", this);
+  }
+
+  public void unregister() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    ms.unregisterSource(metricsSourceName);
+  }
+
+  public String getMetricsSourceName() {
+    return metricsSourceName;
   }
 
   /**
@@ -46,14 +68,14 @@ public class VolumeIOStats {
    * @param bytesRead
    */
   public void incReadBytes(long bytesRead) {
-    readBytes.addAndGet(bytesRead);
+    readBytes.incr(bytesRead);
   }
 
   /**
    * Increment the read operations performed on the volume.
    */
   public void incReadOpCount() {
-    readOpCount.incrementAndGet();
+    readOpCount.incr();
   }
 
   /**
@@ -61,14 +83,14 @@ public class VolumeIOStats {
    * @param bytesWritten
    */
   public void incWriteBytes(long bytesWritten) {
-    writeBytes.addAndGet(bytesWritten);
+    writeBytes.incr(bytesWritten);
   }
 
   /**
    * Increment the write operations performed on the volume.
    */
   public void incWriteOpCount() {
-    writeOpCount.incrementAndGet();
+    writeOpCount.incr();
   }
 
   /**
@@ -76,7 +98,7 @@ public class VolumeIOStats {
    * @param time
    */
   public void incReadTime(long time) {
-    readTime.addAndGet(time);
+    readTime.incr(time);
   }
 
   /**
@@ -84,7 +106,7 @@ public class VolumeIOStats {
    * @param time
    */
   public void incWriteTime(long time) {
-    writeTime.addAndGet(time);
+    writeTime.incr(time);
   }
 
   /**
@@ -92,7 +114,7 @@ public class VolumeIOStats {
    * @return long
    */
   public long getReadBytes() {
-    return readBytes.get();
+    return readBytes.value();
   }
 
   /**
@@ -100,7 +122,7 @@ public class VolumeIOStats {
    * @return long
    */
   public long getWriteBytes() {
-    return writeBytes.get();
+    return writeBytes.value();
   }
 
   /**
@@ -108,7 +130,7 @@ public class VolumeIOStats {
    * @return long
    */
   public long getReadOpCount() {
-    return readOpCount.get();
+    return readOpCount.value();
   }
 
   /**
@@ -116,7 +138,7 @@ public class VolumeIOStats {
    * @return long
    */
   public long getWriteOpCount() {
-    return writeOpCount.get();
+    return writeOpCount.value();
   }
 
   /**
@@ -124,7 +146,7 @@ public class VolumeIOStats {
    * @return long
    */
   public long getReadTime() {
-    return readTime.get();
+    return readTime.value();
   }
 
   /**
@@ -132,7 +154,7 @@ public class VolumeIOStats {
    * @return long
    */
   public long getWriteTime() {
-    return writeTime.get();
+    return writeTime.value();
   }
 
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index 6baff8d..ddd467b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.metrics;
 import static 
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 
 import com.google.common.collect.Maps;
@@ -48,6 +49,7 @@ import 
org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import 
org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
@@ -171,6 +173,18 @@ public class TestContainerMetrics {
       String sec = interval + "s";
       Thread.sleep((interval + 1) * 1000);
       assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics);
+
+      // Check VolumeIOStats metrics
+      HddsVolume hddsVolume = volumeSet.getVolumesList().get(0);
+      MetricsRecordBuilder volumeIOMetrics =
+          getMetrics(hddsVolume.getVolumeIOStats().getMetricsSourceName());
+      assertCounter("ReadBytes", 1024L, volumeIOMetrics);
+      assertCounter("ReadOpCount", 1L, volumeIOMetrics);
+      assertCounter("WriteBytes", 1024L, volumeIOMetrics);
+      assertCounter("WriteOpCount", 1L, volumeIOMetrics);
+      // ReadTime and WriteTime vary from run to run, only checking non-zero
+      Assert.assertNotEquals(0L, getLongCounter("ReadTime", volumeIOMetrics));
+      Assert.assertNotEquals(0L, getLongCounter("WriteTime", volumeIOMetrics));
     } finally {
       if (client != null) {
         client.close();


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: ozone-commits-h...@hadoop.apache.org

Reply via email to