Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.1 91bb336d2 -> a7de3cfa7


HDFS-11915. Sync rbw dir on the first hsync() to avoid file lost on power 
failure. Contributed by Vinayakumar B.

(cherry picked from commit 2273499aef18ac2c7ffc435a61db8cea591e8b1f)
(cherry picked from commit f24d3b69b403f3a2c5af6b9c74a643fb9f4492e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b42f02ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b42f02ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b42f02ca

Branch: refs/heads/branch-2.9.1
Commit: b42f02ca0c011f5998a12dbbc22e26888874a22d
Parents: 91bb336
Author: Wei-Chiu Chuang <weic...@apache.org>
Authored: Fri Jan 12 10:00:00 2018 -0800
Committer: Sammi Chen <sammi.c...@intel.com>
Committed: Tue Apr 10 11:41:48 2018 +0800

----------------------------------------------------------------------
 .../hdfs/server/datanode/BlockReceiver.java       |  9 +++++++++
 .../hadoop/hdfs/server/datanode/DatanodeUtil.java | 18 ++++++++++++++++++
 .../datanode/fsdataset/impl/FsDatasetImpl.java    | 15 ++-------------
 3 files changed, 29 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b42f02ca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index c8a33ca..7f381b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -24,6 +24,7 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
+import java.io.File;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
@@ -127,6 +128,7 @@ class BlockReceiver implements Closeable {
 
   private boolean syncOnClose;
   private volatile boolean dirSyncOnFinalize;
+  private boolean dirSyncOnHSyncDone = false;
   private long restartBudget;
   /** the reference of the volume where the block receiver writes to */
   private ReplicaHandler replicaHandler;
@@ -421,6 +423,13 @@ class BlockReceiver implements Closeable {
       }
       flushTotalNanos += flushEndNanos - flushStartNanos;
     }
+    if (isSync && !dirSyncOnHSyncDone && replicaInfo instanceof ReplicaInfo) {
+      ReplicaInfo rInfo = (ReplicaInfo) replicaInfo;
+      File baseDir = rInfo.getBlockFile().getParentFile();
+      FileIoProvider fileIoProvider = datanode.getFileIoProvider();
+      DatanodeUtil.fsyncDirectory(fileIoProvider, rInfo.getVolume(), baseDir);
+      dirSyncOnHSyncDone = true;
+    }
     if (checksumOut != null || streams.getDataOut() != null) {
       datanode.metrics.addFlushNanos(flushTotalNanos);
       if (isSync) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b42f02ca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
index c98ff54..e29a5ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
@@ -142,4 +142,22 @@ public class DatanodeUtil {
     }
     return (FileInputStream)lin.getWrappedStream();
   }
+
+  /**
+   * Call fsync on specified directories to sync metadata changes.
+   * @param fileIoProvider
+   * @param volume
+   * @param dirs
+   * @throws IOException
+   */
+  public static void fsyncDirectory(FileIoProvider fileIoProvider,
+      FsVolumeSpi volume, File... dirs) throws IOException {
+    for (File dir : dirs) {
+      try {
+        fileIoProvider.dirSync(volume, dir);
+      } catch (IOException e) {
+        throw new IOException("Failed to sync " + dir, e);
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b42f02ca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 2512f6d..4486b73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -929,18 +929,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     return dstfile;
   }
 
-  private void fsyncDirectory(FsVolumeSpi volume, File... dirs)
-      throws IOException {
-    FileIoProvider fileIoProvider = datanode.getFileIoProvider();
-    for (File dir : dirs) {
-      try {
-        fileIoProvider.dirSync(volume, dir);
-      } catch (IOException e) {
-        throw new IOException("Failed to sync " + dir, e);
-      }
-    }
-  }
-
   /**
    * Copy the block and meta files for the given block to the given 
destination.
    * @return the new meta and block files.
@@ -1801,7 +1789,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> 
{
       FsVolumeSpi v = replicaInfo.getVolume();
       File f = replicaInfo.getBlockFile();
       File dest = finalizedReplicaInfo.getBlockFile();
-      fsyncDirectory(v, dest.getParentFile(), f.getParentFile());
+      DatanodeUtil.fsyncDirectory(datanode.getFileIoProvider(), v,
+          dest.getParentFile(), f.getParentFile());
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to