Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c19bdc192 -> 22caeb1c0


HDFS-9674. The HTrace span for OpWriteBlock should record the maxWriteToDisk 
time. Contributed by Colin McCabe.

Change-Id: I9bf3f3bcd57f5880189ad7c160f3dd66f97d904b


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22caeb1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22caeb1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22caeb1c

Branch: refs/heads/branch-2
Commit: 22caeb1c0a8ecfb2327538ac9bbc8fd5f2fe08d5
Parents: c19bdc1
Author: Zhe Zhang <z...@apache.org>
Authored: Thu Jan 21 13:25:42 2016 -0800
Committer: Zhe Zhang <z...@apache.org>
Committed: Thu Jan 21 13:26:54 2016 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt              |  3 +++
 .../hadoop/hdfs/server/datanode/BlockReceiver.java       | 11 +++++++++++
 2 files changed, 14 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22caeb1c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9713a55..67a46c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -47,6 +47,9 @@ Release 2.9.0 - UNRELEASED
 
     HDFS-9542. Move BlockIdManager from FSNamesystem to BlockManager. (jing9)
 
+    HDFS-9674. The HTrace span for OpWriteBlock should record the 
maxWriteToDisk
+    time. (cmccabe via zhz)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22caeb1c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 8003c76..e7908a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -58,6 +58,8 @@ import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.Tracer;
 
 import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED;
 import static 
org.apache.hadoop.io.nativeio.NativeIO.POSIX.SYNC_FILE_RANGE_WRITE;
@@ -136,6 +138,7 @@ class BlockReceiver implements Closeable {
   private long lastResponseTime = 0;
   private boolean isReplaceBlock = false;
   private DataOutputStream replyOut = null;
+  private long maxWriteToDiskMs = 0;
   
   private boolean pinning;
   private long lastSentTime;
@@ -302,6 +305,11 @@ class BlockReceiver implements Closeable {
    */
   @Override
   public void close() throws IOException {
+    Span span = Tracer.getCurrentSpan();
+    if (span != null) {
+      span.addKVAnnotation("maxWriteToDiskMs",
+            Long.toString(maxWriteToDiskMs));
+    }
     packetReceiver.close();
 
     IOException ioe = null;
@@ -697,6 +705,9 @@ class BlockReceiver implements Closeable {
           long begin = Time.monotonicNow();
           out.write(dataBuf.array(), startByteToDisk, numBytesToDisk);
           long duration = Time.monotonicNow() - begin;
+          if (duration > maxWriteToDiskMs) {
+            maxWriteToDiskMs = duration;
+          }
           if (duration > datanodeSlowLogThresholdMs) {
             LOG.warn("Slow BlockReceiver write data to disk cost:" + duration
                 + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");

Reply via email to