This is an automated email from the ASF dual-hosted git repository.

zghao pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit e50d1f5a5369da9a234851821c6b209d4fe98971
Author: Guanghao Zhang <zg...@apache.org>
AuthorDate: Wed Sep 18 21:44:05 2019 +0800

    HBASE-23037 Make the split WAL related log more readable (#632)
    
    Signed-off-by: Duo Zhang <zhang...@apache.org>
---
 .../apache/hadoop/hbase/master/SplitLogManager.java | 11 +++++++----
 .../wal/BoundedLogWriterCreationOutputSink.java     |  5 ++---
 .../org/apache/hadoop/hbase/wal/WALSplitter.java    | 21 +++++++++++++++------
 3 files changed, 24 insertions(+), 13 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index 4d977d3..e99bc16 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -51,6 +51,7 @@ import 
org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLog
 import org.apache.hadoop.hbase.log.HBaseMarkers;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HasThread;
@@ -289,10 +290,12 @@ public class SplitLogManager {
       }
       SplitLogCounters.tot_mgr_log_split_batch_success.increment();
     }
-    String msg = "Finished splitting (more than or equal to) " + totalSize +
-        " bytes in " + ((batch == null)? 0: batch.installed) +
-        " log files in " + logDirs + " in " +
-        ((startTime == 0)? startTime: (EnvironmentEdgeManager.currentTime() - 
startTime)) + "ms";
+    String msg =
+        "Finished splitting (more than or equal to) " + 
StringUtils.humanSize(totalSize) + " (" +
+            totalSize + " bytes) in " + ((batch == null) ? 0 : 
batch.installed) + " log files in " +
+            logDirs + " in " +
+            ((startTime == 0) ? startTime : 
(EnvironmentEdgeManager.currentTime() - startTime)) +
+            "ms";
     status.markComplete(msg);
     LOG.info(msg);
     return totalSize;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedLogWriterCreationOutputSink.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedLogWriterCreationOutputSink.java
index 5fa7bef..77b8f93 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedLogWriterCreationOutputSink.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedLogWriterCreationOutputSink.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.wal;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -73,7 +72,7 @@ public class BoundedLogWriterCreationOutputSink extends 
LogRecoveredEditsOutputS
     for (final Map.Entry<byte[], WALSplitter.RegionEntryBuffer> buffer : 
entryBuffers.buffers
         .entrySet()) {
       LOG.info("Submitting writeThenClose of {}",
-        Arrays.toString(buffer.getValue().encodedRegionName));
+          Bytes.toString(buffer.getValue().encodedRegionName));
       completionService.submit(new Callable<Void>() {
         @Override
         public Void call() throws Exception {
@@ -148,4 +147,4 @@ public class BoundedLogWriterCreationOutputSink extends 
LogRecoveredEditsOutputS
     }
     return dst;
   }
-}
\ No newline at end of file
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index e3aa756..4820f20 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -44,11 +44,13 @@ import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.master.SplitLogManager;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.regionserver.LastSequenceId;
 import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WAL.Reader;
@@ -220,9 +222,11 @@ public class WALSplitter {
           "Splitting log file " + logfile.getPath() + "into a temporary 
staging area.");
     Reader logFileReader = null;
     this.fileBeingSplit = logfile;
+    long startTS = EnvironmentEdgeManager.currentTime();
     try {
       long logLength = logfile.getLen();
-      LOG.info("Splitting WAL={}, length={}", logPath, logLength);
+      LOG.info("Splitting WAL={}, size={} ({} bytes)", logPath, 
StringUtils.humanSize(logLength),
+          logLength);
       status.setStatus("Opening log file");
       if (reporter != null && !reporter.progress()) {
         progress_failed = true;
@@ -233,6 +237,8 @@ public class WALSplitter {
         LOG.warn("Nothing to split in WAL={}", logPath);
         return true;
       }
+      long openCost = EnvironmentEdgeManager.currentTime() - startTS;
+      LOG.info("Open WAL={} cost {} ms", logPath, openCost);
       int numOpenedFilesBeforeReporting = 
conf.getInt("hbase.splitlog.report.openedfiles", 3);
       int numOpenedFilesLastCheck = 0;
       outputSink.setReporter(reporter);
@@ -240,6 +246,7 @@ public class WALSplitter {
       outputSinkStarted = true;
       Entry entry;
       Long lastFlushedSequenceId = -1L;
+      startTS = EnvironmentEdgeManager.currentTime();
       while ((entry = getNextLogLine(logFileReader, logPath, skipErrors)) != 
null) {
         byte[] region = entry.getKey().getEncodedRegionName();
         String encodedRegionNameAsStr = Bytes.toString(region);
@@ -323,11 +330,13 @@ public class WALSplitter {
           progress_failed = outputSink.finishWritingAndClose() == null;
         }
       } finally {
-        String msg =
-            "Processed " + editsCount + " edits across " + 
outputSink.getNumberOfRecoveredRegions()
-                + " regions; edits skipped=" + editsSkipped + "; log file=" + 
logPath +
-                ", length=" + logfile.getLen() + // See if length got updated 
post lease recovery
-                ", corrupted=" + isCorrupted + ", progress failed=" + 
progress_failed;
+        long processCost = EnvironmentEdgeManager.currentTime() - startTS;
+        // See if length got updated post lease recovery
+        String msg = "Processed " + editsCount + " edits across " +
+            outputSink.getNumberOfRecoveredRegions() + " regions cost " + 
processCost +
+            " ms; edits skipped=" + editsSkipped + "; WAL=" + logPath + ", 
size=" +
+            StringUtils.humanSize(logfile.getLen()) + ", length=" + 
logfile.getLen() +
+            ", corrupted=" + isCorrupted + ", progress failed=" + 
progress_failed;
         LOG.info(msg);
         status.markComplete(msg);
       }

Reply via email to