Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 2b58fb8b0 -> 42c2d36ba


HDFS-7990. IBR delete ack should not be delayed. Contributed by Daryn Sharp. 
Backport HDFS-11838 by Vinitha Gankidi.

(cherry picked from commit 60882ab26d49f05cbf0686944af6559f86b3417d)

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c3f6aed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c3f6aed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c3f6aed

Branch: refs/heads/branch-2.7
Commit: 2c3f6aedf11ef0729a306faab26e22854de4248d
Parents: 2b58fb8
Author: Konstantin V Shvachko <s...@apache.org>
Authored: Wed May 17 23:58:27 2017 -0700
Committer: Konstantin V Shvachko <s...@apache.org>
Committed: Wed May 17 23:58:27 2017 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt            |  3 +++
 .../hadoop/hdfs/server/datanode/BPServiceActor.java    | 13 +++----------
 .../org/apache/hadoop/hdfs/server/datanode/DNConf.java |  2 --
 .../hdfs/server/datanode/SimulatedFSDataset.java       | 13 ++++++++++++-
 .../server/datanode/TestIncrementalBlockReports.java   |  4 ++--
 5 files changed, 20 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c3f6aed/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 19b2996..32a5069 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -88,6 +88,9 @@ Release 2.7.4 - UNRELEASED
     HDFS-11785. Backport HDFS-9902 to branch-2.7: Support different values of
     dfs.datanode.du.reserved per storage type. (Brahma Reddy Battula)
 
+    HDFS-7990. IBR delete ack should not be delayed. (Daryn Sharp.
+    Backport HDFS-11838 by Vinitha Gankidi)
+
   OPTIMIZATIONS
 
     HDFS-10896. Move lock logging logic from FSNamesystem into 
FSNamesystemLock.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c3f6aed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 05aca6c..38b7e94 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -82,8 +82,6 @@ class BPServiceActor implements Runnable {
   HAServiceState state;
 
   final BPOfferService bpos;
-  
-  volatile long lastDeletedReport = 0;
 
   volatile long lastCacheReport = 0;
   private final Scheduler scheduler;
@@ -385,10 +383,10 @@ class BPServiceActor implements Runnable {
   @VisibleForTesting
   void triggerDeletionReportForTests() {
     synchronized (pendingIncrementalBRperStorage) {
-      lastDeletedReport = 0;
+      sendImmediateIBR = true;
       pendingIncrementalBRperStorage.notifyAll();
 
-      while (lastDeletedReport == 0) {
+      while (sendImmediateIBR) {
         try {
           pendingIncrementalBRperStorage.wait(100);
         } catch (InterruptedException e) {
@@ -421,7 +419,6 @@ class BPServiceActor implements Runnable {
    */
   List<DatanodeCommand> blockReport() throws IOException {
     // send block report if timer has expired.
-    final long startTime = scheduler.monotonicNow(); 
     if (!scheduler.isBlockReportDue()) {
       return null;
     }
@@ -433,7 +430,6 @@ class BPServiceActor implements Runnable {
     // or we will report an RBW replica after the BlockReport already reports
     // a FINALIZED one.
     reportReceivedDeletedBlocks();
-    lastDeletedReport = startTime;
 
     long brCreateStartTime = monotonicNow();
     Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
@@ -624,7 +620,6 @@ class BPServiceActor implements Runnable {
    */
   private void offerService() throws Exception {
     LOG.info("For namenode " + nnAddr + " using"
-        + " DELETEREPORT_INTERVAL of " + dnConf.deleteReportInterval + " msec "
         + " BLOCKREPORT_INTERVAL of " + dnConf.blockReportInterval + "msec"
         + " CACHEREPORT_INTERVAL of " + dnConf.cacheReportInterval + "msec"
         + " Initial delay: " + dnConf.initialBlockReportDelay + "msec"
@@ -679,10 +674,8 @@ class BPServiceActor implements Runnable {
             }
           }
         }
-        if (sendImmediateIBR ||
-            (startTime - lastDeletedReport > dnConf.deleteReportInterval)) {
+        if (sendImmediateIBR || sendHeartbeat) {
           reportReceivedDeletedBlocks();
-          lastDeletedReport = startTime;
         }
 
         List<DatanodeCommand> cmds = blockReport();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c3f6aed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
index e59f23f..675e6f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
@@ -84,7 +84,6 @@ public class DNConf {
   final long heartBeatInterval;
   final long blockReportInterval;
   final long blockReportSplitThreshold;
-  final long deleteReportInterval;
   final long initialBlockReportDelay;
   final long cacheReportInterval;
   final long dfsclientSlowIoWarningThresholdMs;
@@ -168,7 +167,6 @@ public class DNConf {
     heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY,
         DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000L;
     
-    this.deleteReportInterval = 100 * heartBeatInterval;
     // do we need to sync block file contents to disk when blockfile is closed?
     this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, 
         DFS_DATANODE_SYNCONCLOSE_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c3f6aed/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 63b6bb4..86f0d42 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -84,7 +84,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     @Override
     public SimulatedFSDataset newInstance(DataNode datanode,
         DataStorage storage, Configuration conf) throws IOException {
-      return new SimulatedFSDataset(storage, conf);
+      return new SimulatedFSDataset(datanode, storage, conf);
     }
 
     @Override
@@ -519,8 +519,15 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
   private final SimulatedStorage storage;
   private final SimulatedVolume volume;
   private final String datanodeUuid;
+  private final DataNode datanode;
   
+
   public SimulatedFSDataset(DataStorage storage, Configuration conf) {
+    this(null, storage, conf);
+  }
+
+  public SimulatedFSDataset(DataNode datanode, DataStorage storage, 
Configuration conf) {
+    this.datanode = datanode;
     if (storage != null) {
       for (int i = 0; i < storage.getNumStorageDirs(); ++i) {
         DataStorage.createStorageID(storage.getStorageDir(i), false);
@@ -747,6 +754,10 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
       }
       storage.free(bpid, binfo.getNumBytes());
       map.remove(b);
+      if (datanode != null) {
+        datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, b),
+            binfo.getStorageUuid());
+      }
     }
     if (error) {
       throw new IOException("Invalidate: Missing blocks.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c3f6aed/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
index b5aa93f..cd2c125 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestIncrementalBlockReports.java
@@ -159,8 +159,8 @@ public class TestIncrementalBlockReports {
           anyString(),
           any(StorageReceivedDeletedBlocks[].class));
 
-      // Trigger a block report, this also triggers an IBR.
-      DataNodeTestUtils.triggerBlockReport(singletonDn);
+      // Trigger a heartbeat, this also triggers an IBR.
+      DataNodeTestUtils.triggerHeartbeat(singletonDn);
       Thread.sleep(2000);
 
       // Ensure that the deleted block is reported.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to