This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 7427026300 HDDS-10772. [Ozone-Streaming] Stream write metric is wrong 
(#6610)
7427026300 is described below

commit 7427026300fd2ca03cc6a310a4c746b711f616da
Author: hao guo <[email protected]>
AuthorDate: Thu May 9 13:20:29 2024 +0800

    HDDS-10772. [Ozone-Streaming] Stream write metric is wrong (#6610)
---
 .../ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java   | 7 +------
 .../ozone/container/keyvalue/impl/StreamDataChannelBase.java       | 3 +++
 2 files changed, 4 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
index 185ad9c001..7a08c7ef4e 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
@@ -27,7 +27,6 @@ import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerExcep
 import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.util.Time;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf;
 import org.apache.ratis.thirdparty.io.netty.buffer.Unpooled;
@@ -167,11 +166,7 @@ public class KeyValueStreamDataChannel extends 
StreamDataChannelBase {
     getMetrics().incContainerOpsMetrics(getType());
     assertOpen();
 
-    final long l = Time.monotonicNow();
-    int len = writeBuffers(referenceCounted, buffers, super::writeFileChannel);
-    getMetrics()
-        .incContainerOpsLatencies(getType(), Time.monotonicNow() - l);
-    return len;
+    return writeBuffers(referenceCounted, buffers, this::writeFileChannel);
   }
 
   static int writeBuffers(ReferenceCountedObject<ByteBuffer> src,
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
index 810495b2a7..a88f452167 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
@@ -22,6 +22,7 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.util.Time;
 import org.apache.ratis.statemachine.StateMachine;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -129,9 +130,11 @@ abstract class StreamDataChannelBase
 
   final int writeFileChannel(ByteBuffer src) throws IOException {
     try {
+      final long startTime = Time.monotonicNow();
       final int writeBytes = getChannel().write(src);
       metrics.incContainerBytesStats(getType(), writeBytes);
       containerData.updateWriteStats(writeBytes, false);
+      metrics.incContainerOpsLatencies(getType(), Time.monotonicNow() - 
startTime);
       return writeBytes;
     } catch (IOException e) {
       checkVolume();


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to