Repository: hadoop
Updated Branches:
  refs/heads/HDFS-11634-2.7 [created] c2dba1a40
  refs/heads/b-2.7-HDFS-12371 [created] 7d6bc56f1
  refs/heads/branch-2 c138682e0 -> de419e277
  refs/heads/branch-2-HDFS-10301 [created] 06d3eb751
  refs/heads/branch-2.8 fb15e4173 -> 1da69ae2f
  refs/heads/branch-2.9 5cbbc4c37 -> b0d602112
  refs/heads/branch-3.0 7483acf48 -> 4ba5efa96
  refs/heads/branch-3.1 e96c7bf82 -> 4ce45a8ec
  refs/heads/rel/release-2.7.3-branch [created] baa91f7c6
  refs/heads/revert [created] 2b05559b2
  refs/heads/truncate [created] 453a17541
  refs/heads/trunk 659074728 -> acfd764fc


http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
index c512038..ced3296 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -53,8 +54,8 @@ public class BlockRecoveryCommand extends DatanodeCommand {
   @InterfaceAudience.Private
   @InterfaceStability.Evolving
   public static class RecoveringBlock extends LocatedBlock {
-    private boolean truncate;
     private final long newGenerationStamp;
+    private final Block recoveryBlock;
 
     /**
      * Create RecoveringBlock.
@@ -62,15 +63,17 @@ public class BlockRecoveryCommand extends DatanodeCommand {
     public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS) {
       super(b, locs, -1, false); // startOffset is unknown
       this.newGenerationStamp = newGS;
+      this.recoveryBlock = null;
     }
 
     /**
-     * RecoveryingBlock with truncate option.
+     * Create RecoveringBlock with copy-on-truncate option.
      */
-    public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS,
-                           boolean truncate) {
-      this(b, locs, newGS);
-      this.truncate = truncate;
+    public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs,
+        Block recoveryBlock) {
+      super(b, locs, -1, false); // startOffset is unknown
+      this.newGenerationStamp = recoveryBlock.getGenerationStamp();
+      this.recoveryBlock = recoveryBlock;
     }
 
     /**
@@ -82,10 +85,10 @@ public class BlockRecoveryCommand extends DatanodeCommand {
     }
 
     /**
-     * Return whether to truncate the block to the ExtendedBlock's length.
+     * Return the new block.
      */
-    public boolean getTruncateFlag() {
-      return truncate;
+    public Block getNewBlock() {
+      return recoveryBlock;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
index 62915b4..72cb0c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
@@ -67,5 +67,6 @@ public interface InterDatanodeProtocol {
    * Update replica with the new generation stamp and length.  
    */
   String updateReplicaUnderRecovery(ExtendedBlock oldBlock, long recoveryId,
-      long newLength) throws IOException;
+                                    long newBlockId, long newLength)
+      throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
index 47f79be..1a21777 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
@@ -59,6 +59,8 @@ message UpdateReplicaUnderRecoveryRequestProto {
   required ExtendedBlockProto block = 1; // Block identifier
   required uint64 recoveryId = 2;        // New genstamp of the replica
   required uint64 newLength = 3;         // New length of the replica
+  // New blockId for copy (truncate), default is 0.
+  optional uint64 newBlockId = 4 [default = 0];
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
index 588f6c8..643a034 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
@@ -270,6 +270,7 @@ message SnapshotDiffSection {
     optional uint64 fileSize = 2;
     optional bytes name = 3;
     optional INodeSection.INodeFile snapshotCopy = 4;
+    repeated BlockProto blocks = 5;
   }
 
   message DiffEntry {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index d989c0a..97906b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -554,9 +554,9 @@ enum ReplicaStateProto {
  * Block that needs to be recovered with at a given location
  */
 message RecoveringBlockProto {
-  required uint64 newGenStamp = 1;      // New genstamp post recovery
-  required LocatedBlockProto block = 2; // Block to be recovered
-  optional bool truncateFlag = 3;       // Block needs to be truncated
+  required uint64 newGenStamp = 1;        // New genstamp post recovery
+  required LocatedBlockProto block = 2;   // Block to be recovered
+  optional BlockProto truncateBlock = 3;  // New block for recovery (truncate)
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index e4834d6..7a4960e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1239,7 +1239,7 @@ public class TestReplicationPolicy {
     when(mbc.setLastBlock((BlockInfo) any(), (DatanodeStorageInfo[]) any()))
     .thenReturn(ucBlock);
 
-    bm.convertLastBlockToUnderConstruction(mbc);
+    bm.convertLastBlockToUnderConstruction(mbc, 0L);
 
     // Choose 1 block from UnderReplicatedBlocks. Then it should pick 1 block
     // from QUEUE_VERY_UNDER_REPLICATED.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index e03b756..78eedf9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -1106,6 +1106,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
   @Override // FsDatasetSpi
   public String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
                                         long recoveryId,
+                                        long newBlockId,
                                         long newlength) {
     // Caller does not care about the exact Storage UUID returned.
     return datanodeUuid;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 987b480..9bf5e52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -219,10 +218,10 @@ public class TestBlockRecovery {
     syncList.add(record1);
     syncList.add(record2);
     
-    when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
-        anyLong())).thenReturn("storage1");
-    when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(), 
-        anyLong())).thenReturn("storage2");
+    when(dn1.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
+        anyLong(), anyLong())).thenReturn("storage1");
+    when(dn2.updateReplicaUnderRecovery((ExtendedBlock)anyObject(), anyLong(),
+        anyLong(), anyLong())).thenReturn("storage2");
     dn.syncBlock(rBlock, syncList);
   }
   
@@ -245,8 +244,10 @@ public class TestBlockRecovery {
     InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
 
     testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
-    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
-    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);  
  
+    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+        REPLICA_LEN1);
+    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+        REPLICA_LEN1);
 
     // two finalized replicas have different length
     replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
@@ -284,8 +285,10 @@ public class TestBlockRecovery {
     InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
 
     testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
-    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
-    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+        REPLICA_LEN1);
+    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+        REPLICA_LEN1);
     
     // rbw replica has a different length from the finalized one
     replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
@@ -297,9 +300,10 @@ public class TestBlockRecovery {
     dn2 = mock(InterDatanodeProtocol.class);
 
     testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
-    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+        REPLICA_LEN1);
     verify(dn2, never()).updateReplicaUnderRecovery(
-        block, RECOVERY_ID, REPLICA_LEN1);
+        block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
   }
   
   /**
@@ -323,9 +327,10 @@ public class TestBlockRecovery {
     InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
 
     testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
-    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+        REPLICA_LEN1);
     verify(dn2, never()).updateReplicaUnderRecovery(
-        block, RECOVERY_ID, REPLICA_LEN1);
+        block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
     
     // rbw replica has a different length from the finalized one
     replica1 = new ReplicaRecoveryInfo(BLOCK_ID, 
@@ -337,9 +342,10 @@ public class TestBlockRecovery {
     dn2 = mock(InterDatanodeProtocol.class);
 
     testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
-    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID,
+        REPLICA_LEN1);
     verify(dn2, never()).updateReplicaUnderRecovery(
-        block, RECOVERY_ID, REPLICA_LEN1);
+        block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
   }
   
   /**
@@ -362,8 +368,8 @@ public class TestBlockRecovery {
 
     long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
     testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
-    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
-    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);    
+    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, 
minLen);
+    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, 
minLen);
   }
   
   /**
@@ -385,9 +391,9 @@ public class TestBlockRecovery {
     InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
 
     testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
-    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
+    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, 
REPLICA_LEN1);
     verify(dn2, never()).updateReplicaUnderRecovery(
-        block, RECOVERY_ID, REPLICA_LEN1);    
+        block, RECOVERY_ID, BLOCK_ID, REPLICA_LEN1);
   }
   
   /**
@@ -411,8 +417,8 @@ public class TestBlockRecovery {
     long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
     testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
     
-    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
-    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);    
+    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, 
minLen);
+    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, BLOCK_ID, 
minLen);
   }  
 
   private Collection<RecoveringBlock> initRecoveringBlocks() throws 
IOException {
@@ -513,7 +519,7 @@ public class TestBlockRecovery {
     }
     DataNode spyDN = spy(dn);
     doThrow(new IOException()).when(spyDN).updateReplicaUnderRecovery(
-        block, RECOVERY_ID, block.getNumBytes());
+        block, RECOVERY_ID, BLOCK_ID, block.getNumBytes());
     try {
       spyDN.syncBlock(rBlock, initBlockRecords(spyDN));
       fail("Sync should fail");
@@ -634,7 +640,8 @@ public class TestBlockRecovery {
           recoveryInitResult.get());
       
       dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock()
-          .getGenerationStamp() + 1, block.getBlockSize());
+          .getGenerationStamp() + 1, block.getBlock().getBlockId(),
+          block.getBlockSize());
     } finally {
       if (null != cluster) {
         cluster.shutdown();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
index 65a5176..3609684 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
@@ -198,7 +198,8 @@ public class TestInterDatanodeProtocol {
       //verify updateBlock
       ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(),
           b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
-      idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
+      idp.updateReplicaUnderRecovery(b, recoveryId, b.getBlockId(),
+          newblock.getNumBytes());
       checkMetaInfo(newblock, datanode);
       
       // Verify correct null response trying to init recovery for a missing 
block
@@ -368,7 +369,8 @@ public class TestInterDatanodeProtocol {
             .getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
         try {
           //update should fail
-          fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
+          fsdataset.updateReplicaUnderRecovery(tmp, recoveryid,
+              tmp.getBlockId(), newlength);
           Assert.fail();
         } catch(IOException ioe) {
           System.out.println("GOOD: getting " + ioe);
@@ -377,7 +379,8 @@ public class TestInterDatanodeProtocol {
 
       //update
       final String storageID = fsdataset.updateReplicaUnderRecovery(
-          new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, newlength);
+          new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid,
+          rri.getBlockId(), newlength);
       assertTrue(storageID != null);
 
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
index d0502b3..eae65cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
@@ -71,6 +71,7 @@ public class TestCommitBlockSynchronization {
     doReturn(true).when(file).isUnderConstruction();
 
     doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
+    doReturn(blockInfo).when(file).getLastBlock();
     doReturn("").when(namesystemSpy).closeFileCommitBlocks(
         any(INodeFile.class), any(BlockInfo.class));
     doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
@@ -105,6 +106,7 @@ public class TestCommitBlockSynchronization {
     completedBlockInfo.setGenerationStamp(genStamp);
     doReturn(completedBlockInfo).when(namesystemSpy)
         .getStoredBlock(any(Block.class));
+    doReturn(completedBlockInfo).when(file).getLastBlock();
 
     // Repeat the call to make sure it does not throw
     namesystemSpy.commitBlockSynchronization(
@@ -176,6 +178,7 @@ public class TestCommitBlockSynchronization {
     completedBlockInfo.setGenerationStamp(genStamp);
     doReturn(completedBlockInfo).when(namesystemSpy)
         .getStoredBlock(any(Block.class));
+    doReturn(completedBlockInfo).when(file).getLastBlock();
 
     namesystemSpy.commitBlockSynchronization(
         lastBlock, genStamp, length, true, false, newTargets, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/453a1754/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index ba9d04e..1f854d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -18,14 +18,22 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
-import java.net.InetAddress;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -39,14 +47,14 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -57,6 +65,7 @@ public class TestFileTruncate {
     GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
     GenericTestUtils.setLogLevel(FSEditLogLoader.LOG, Level.ALL);
   }
+  static final Log LOG = LogFactory.getLog(TestFileTruncate.class);
   static final int BLOCK_SIZE = 4;
   static final short REPLICATION = 3;
   static final int DATANODE_NUM = 3;
@@ -129,6 +138,287 @@ public class TestFileTruncate {
     fs.delete(parent, true);
   }
 
+  @Test
+  public void testSnapshotWithAppendTruncate() throws IOException {
+    testSnapshotWithAppendTruncate(0, 1, 2);
+    testSnapshotWithAppendTruncate(0, 2, 1);
+    testSnapshotWithAppendTruncate(1, 0, 2);
+    testSnapshotWithAppendTruncate(1, 2, 0);
+    testSnapshotWithAppendTruncate(2, 0, 1);
+    testSnapshotWithAppendTruncate(2, 1, 0);
+  }
+
+  /**
+   * Create three snapshots with appended and truncated file.
+   * Delete snapshots in the specified order and verify that
+   * remaining snapshots are still readable.
+   */
+  void testSnapshotWithAppendTruncate(int ... deleteOrder) throws IOException {
+    FSDirectory fsDir = cluster.getNamesystem().getFSDirectory();
+    Path parent = new Path("/test");
+    fs.mkdirs(parent);
+    fs.setQuota(parent, 100, 1000);
+    fs.allowSnapshot(parent);
+    String truncateFile = "testSnapshotWithAppendTruncate";
+    final Path src = new Path(parent, truncateFile);
+    int[] length = new int[4];
+    length[0] = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
+    DFSTestUtil.createFile(fs, src, 64, length[0], BLOCK_SIZE, REPLICATION, 
0L);
+    Block firstBlk = getLocatedBlocks(src).get(0).getBlock().getLocalBlock();
+    Path[] snapshotFiles = new Path[4];
+
+    // Diskspace consumed should be 10 bytes * 3. [blk 1,2,3]
+    ContentSummary contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(30L));
+
+    // Add file to snapshot and append
+    String[] ss = new String[] {"ss0", "ss1", "ss2", "ss3"};
+    Path snapshotDir = fs.createSnapshot(parent, ss[0]);
+    snapshotFiles[0] = new Path(snapshotDir, truncateFile);
+    length[1] = length[2] = length[0] + BLOCK_SIZE + 1;
+    DFSTestUtil.appendFile(fs, src, BLOCK_SIZE + 1);
+    Block lastBlk = getLocatedBlocks(src).getLastLocatedBlock()
+                                         .getBlock().getLocalBlock();
+
+    // Diskspace consumed should be 15 bytes * 3. [blk 1,2,3,4]
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(45L));
+
+    // Create another snapshot without changes
+    snapshotDir = fs.createSnapshot(parent, ss[1]);
+    snapshotFiles[1] = new Path(snapshotDir, truncateFile);
+
+    // Create another snapshot and append
+    snapshotDir = fs.createSnapshot(parent, ss[2]);
+    snapshotFiles[2] = new Path(snapshotDir, truncateFile);
+    DFSTestUtil.appendFile(fs, src, BLOCK_SIZE -1 + BLOCK_SIZE / 2);
+    Block appendedBlk = getLocatedBlocks(src).getLastLocatedBlock()
+                                             .getBlock().getLocalBlock();
+
+    // Diskspace consumed should be 20 bytes * 3. [blk 1,2,3,4,5]
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(60L));
+
+    // Truncate to block boundary
+    int newLength = length[0] + BLOCK_SIZE / 2;
+    boolean isReady = fs.truncate(src, newLength);
+    assertTrue("Recovery is not expected.", isReady);
+    assertFileLength(snapshotFiles[2], length[2]);
+    assertFileLength(snapshotFiles[1], length[1]);
+    assertFileLength(snapshotFiles[0], length[0]);
+    assertBlockNotPresent(appendedBlk);
+
+    // Diskspace consumed should be 16 bytes * 3. [blk 1,2,3 SS:4]
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(48L));
+
+    // Truncate full block again
+    newLength = length[0] - BLOCK_SIZE / 2;
+    isReady = fs.truncate(src, newLength);
+    assertTrue("Recovery is not expected.", isReady);
+    assertFileLength(snapshotFiles[2], length[2]);
+    assertFileLength(snapshotFiles[1], length[1]);
+    assertFileLength(snapshotFiles[0], length[0]);
+
+    // Diskspace consumed should be 16 bytes * 3. [blk 1,2 SS:3,4]
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(48L));
+
+    // Truncate half of the last block
+    newLength -= BLOCK_SIZE / 2;
+    isReady = fs.truncate(src, newLength);
+    assertFalse("Recovery is expected.", isReady);
+    checkBlockRecovery(src);
+    assertFileLength(snapshotFiles[2], length[2]);
+    assertFileLength(snapshotFiles[1], length[1]);
+    assertFileLength(snapshotFiles[0], length[0]);
+    Block replacedBlk = getLocatedBlocks(src).getLastLocatedBlock()
+        .getBlock().getLocalBlock();
+
+    // Diskspace consumed should be 16 bytes * 3. [blk 1,6 SS:2,3,4]
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(54L));
+
+    snapshotDir = fs.createSnapshot(parent, ss[3]);
+    snapshotFiles[3] = new Path(snapshotDir, truncateFile);
+    length[3] = newLength;
+
+    // Delete file. Should still be able to read snapshots
+    int numINodes = fsDir.getInodeMapSize();
+    isReady = fs.delete(src, false);
+    assertTrue("Delete failed.", isReady);
+    assertFileLength(snapshotFiles[3], length[3]);
+    assertFileLength(snapshotFiles[2], length[2]);
+    assertFileLength(snapshotFiles[1], length[1]);
+    assertFileLength(snapshotFiles[0], length[0]);
+    assertEquals("Number of INodes should not change",
+        numINodes, fsDir.getInodeMapSize());
+
+    fs.deleteSnapshot(parent, ss[3]);
+
+    assertBlockExists(firstBlk);
+    assertBlockExists(lastBlk);
+    assertBlockNotPresent(replacedBlk);
+
+    // Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4]
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(48L));
+
+    // delete snapshots in the specified order
+    fs.deleteSnapshot(parent, ss[deleteOrder[0]]);
+    assertFileLength(snapshotFiles[deleteOrder[1]], length[deleteOrder[1]]);
+    assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
+    assertBlockExists(firstBlk);
+    assertBlockExists(lastBlk);
+    assertEquals("Number of INodes should not change",
+        numINodes, fsDir.getInodeMapSize());
+
+    // Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4]
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(48L));
+
+    fs.deleteSnapshot(parent, ss[deleteOrder[1]]);
+    assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
+    assertBlockExists(firstBlk);
+    contentSummary = fs.getContentSummary(parent);
+    if(fs.exists(snapshotFiles[0])) {
+      // Diskspace consumed should be 0 bytes * 3. [SS:1,2,3]
+      assertBlockNotPresent(lastBlk);
+      assertThat(contentSummary.getSpaceConsumed(), is(36L));
+    } else {
+      // Diskspace consumed should be 48 bytes * 3. [SS:1,2,3,4]
+      assertThat(contentSummary.getSpaceConsumed(), is(48L));
+    }
+    assertEquals("Number of INodes should not change",
+        numINodes, fsDir .getInodeMapSize());
+
+    fs.deleteSnapshot(parent, ss[deleteOrder[2]]);
+    assertBlockNotPresent(firstBlk);
+    assertBlockNotPresent(lastBlk);
+
+    // Diskspace consumed should be 0 bytes * 3. []
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(0L));
+    assertNotEquals("Number of INodes should change",
+        numINodes, fsDir.getInodeMapSize());
+  }
+
+  /**
+   * Create three snapshots with file truncated 3 times.
+   * Delete snapshots in the specified order and verify that
+   * remaining snapshots are still readable.
+   */
+  @Test
+  public void testSnapshotWithTruncates() throws IOException {
+    testSnapshotWithTruncates(0, 1, 2);
+    testSnapshotWithTruncates(0, 2, 1);
+    testSnapshotWithTruncates(1, 0, 2);
+    testSnapshotWithTruncates(1, 2, 0);
+    testSnapshotWithTruncates(2, 0, 1);
+    testSnapshotWithTruncates(2, 1, 0);
+  }
+
+  void testSnapshotWithTruncates(int ... deleteOrder) throws IOException {
+    Path parent = new Path("/test");
+    fs.mkdirs(parent);
+    fs.setQuota(parent, 100, 1000);
+    fs.allowSnapshot(parent);
+    String truncateFile = "testSnapshotWithTruncates";
+    final Path src = new Path(parent, truncateFile);
+    int[] length = new int[3];
+    length[0] = 3 * BLOCK_SIZE;
+    DFSTestUtil.createFile(fs, src, 64, length[0], BLOCK_SIZE, REPLICATION, 
0L);
+    Block firstBlk = getLocatedBlocks(src).get(0).getBlock().getLocalBlock();
+    Block lastBlk = getLocatedBlocks(src).getLastLocatedBlock()
+                                         .getBlock().getLocalBlock();
+    Path[] snapshotFiles = new Path[3];
+
+    // Diskspace consumed should be 12 bytes * 3. [blk 1,2,3]
+    ContentSummary contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(36L));
+
+    // Add file to snapshot and append
+    String[] ss = new String[] {"ss0", "ss1", "ss2"};
+    Path snapshotDir = fs.createSnapshot(parent, ss[0]);
+    snapshotFiles[0] = new Path(snapshotDir, truncateFile);
+    length[1] = 2 * BLOCK_SIZE;
+    boolean isReady = fs.truncate(src, 2 * BLOCK_SIZE);
+    assertTrue("Recovery is not expected.", isReady);
+
+    // Diskspace consumed should be 12 bytes * 3. [blk 1,2 SS:3]
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(36L));
+    snapshotDir = fs.createSnapshot(parent, ss[1]);
+    snapshotFiles[1] = new Path(snapshotDir, truncateFile);
+
+    // Create another snapshot with truncate
+    length[2] = BLOCK_SIZE + BLOCK_SIZE / 2;
+    isReady = fs.truncate(src, BLOCK_SIZE + BLOCK_SIZE / 2);
+    assertFalse("Recovery is expected.", isReady);
+    checkBlockRecovery(src);
+    snapshotDir = fs.createSnapshot(parent, ss[2]);
+    snapshotFiles[2] = new Path(snapshotDir, truncateFile);
+    assertFileLength(snapshotFiles[0], length[0]);
+    assertBlockExists(lastBlk);
+
+    // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(42L));
+
+    fs.deleteSnapshot(parent, ss[deleteOrder[0]]);
+    assertFileLength(snapshotFiles[deleteOrder[1]], length[deleteOrder[1]]);
+    assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
+    assertFileLength(src, length[2]);
+    assertBlockExists(firstBlk);
+
+    contentSummary = fs.getContentSummary(parent);
+    if(fs.exists(snapshotFiles[0])) {
+      // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
+      assertThat(contentSummary.getSpaceConsumed(), is(42L));
+      assertBlockExists(lastBlk);
+    } else {
+      // Diskspace consumed should be 10 bytes * 3. [blk 1,4 SS:2]
+      assertThat(contentSummary.getSpaceConsumed(), is(30L));
+      assertBlockNotPresent(lastBlk);
+    }
+
+    fs.deleteSnapshot(parent, ss[deleteOrder[1]]);
+    assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
+    assertFileLength(src, length[2]);
+    assertBlockExists(firstBlk);
+
+    contentSummary = fs.getContentSummary(parent);
+    if(fs.exists(snapshotFiles[0])) {
+      // Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
+      assertThat(contentSummary.getSpaceConsumed(), is(42L));
+      assertBlockExists(lastBlk);
+    } else if(fs.exists(snapshotFiles[1])) {
+      // Diskspace consumed should be 10 bytes * 3. [blk 1,4 SS:2]
+      assertThat(contentSummary.getSpaceConsumed(), is(30L));
+      assertBlockNotPresent(lastBlk);
+    } else {
+      // Diskspace consumed should be 6 bytes * 3. [blk 1,4 SS:]
+      assertThat(contentSummary.getSpaceConsumed(), is(18L));
+      assertBlockNotPresent(lastBlk);
+    }
+
+    fs.deleteSnapshot(parent, ss[deleteOrder[2]]);
+    assertFileLength(src, length[2]);
+    assertBlockExists(firstBlk);
+
+    // Diskspace consumed should be 6 bytes * 3. [blk 1,4 SS:]
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(18L));
+    assertThat(contentSummary.getLength(), is(6L));
+
+    fs.delete(src, false);
+    assertBlockNotPresent(firstBlk);
+
+    // Diskspace consumed should be 0 bytes * 3. []
+    contentSummary = fs.getContentSummary(parent);
+    assertThat(contentSummary.getSpaceConsumed(), is(0L));
+  }
+
   /**
    * Failure / recovery test for truncate.
    * In this failure the DNs fail to recover the blocks and the NN triggers
@@ -159,8 +449,6 @@ public class TestFileTruncate {
     boolean isReady = fs.truncate(p, newLength);
     assertThat("truncate should have triggered block recovery.",
         isReady, is(false));
-    FileStatus fileStatus = fs.getFileStatus(p);
-    assertThat(fileStatus.getLen(), is((long) newLength));
 
     boolean recoveryTriggered = false;
     for(int i = 0; i < RECOVERY_ATTEMPTS; i++) {
@@ -168,8 +456,6 @@ public class TestFileTruncate {
           NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(),
           p.toUri().getPath());
       if(leaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER)) {
-        cluster.startDataNodes(conf, DATANODE_NUM, true,
-            HdfsServerConstants.StartupOption.REGULAR, null);
         recoveryTriggered = true;
         break;
       }
@@ -177,6 +463,9 @@ public class TestFileTruncate {
     }
     assertThat("lease recovery should have occurred in ~" +
         SLEEP * RECOVERY_ATTEMPTS + " ms.", recoveryTriggered, is(true));
+    cluster.startDataNodes(conf, DATANODE_NUM, true,
+        StartupOption.REGULAR, null);
+    cluster.waitActive();
 
     checkBlockRecovery(p);
 
@@ -184,10 +473,10 @@ public class TestFileTruncate {
         .setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
             HdfsConstants.LEASE_HARDLIMIT_PERIOD);
 
-    fileStatus = fs.getFileStatus(p);
+    FileStatus fileStatus = fs.getFileStatus(p);
     assertThat(fileStatus.getLen(), is((long) newLength));
 
-    AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
+    checkFullFile(p, newLength, contents);
     fs.delete(p, false);
   }
 
@@ -198,10 +487,9 @@ public class TestFileTruncate {
   public void testTruncateEditLogLoad() throws IOException {
     int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
     int toTruncate = 1;
-
+    final String s = "/testTruncateEditLogLoad";
+    final Path p = new Path(s);
     byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
-
-    final Path p = new Path("/testTruncateEditLogLoad");
     writeContents(contents, startingFileSize, p);
 
     int newLength = startingFileSize - toTruncate;
@@ -209,54 +497,183 @@ public class TestFileTruncate {
     assertThat("truncate should have triggered block recovery.",
         isReady, is(false));
 
-    checkBlockRecovery(p);
-
     cluster.restartNameNode();
 
+    String holder = UserGroupInformation.getCurrentUser().getUserName();
+    cluster.getNamesystem().recoverLease(s, holder, "");
+
+    checkBlockRecovery(p);
+
     FileStatus fileStatus = fs.getFileStatus(p);
     assertThat(fileStatus.getLen(), is((long) newLength));
 
-    AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
+    checkFullFile(p, newLength, contents);
     fs.delete(p, false);
   }
 
   /**
+   * Upgrade, RollBack, and restart test for Truncate.
+   */
+  @Test
+  public void testUpgradeAndRestart() throws IOException {
+    Path parent = new Path("/test");
+    fs.mkdirs(parent);
+    fs.setQuota(parent, 100, 1000);
+    fs.allowSnapshot(parent);
+    String truncateFile = "testUpgrade";
+    final Path p = new Path(parent, truncateFile);
+    int startingFileSize = 2 * BLOCK_SIZE;
+    int toTruncate = 1;
+    byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
+    writeContents(contents, startingFileSize, p);
+
+    Path snapshotDir = fs.createSnapshot(parent, "ss0");
+    Path snapshotFile = new Path(snapshotDir, truncateFile);
+
+    int newLengthBeforeUpgrade = startingFileSize - toTruncate;
+    boolean isReady = fs.truncate(p, newLengthBeforeUpgrade);
+    assertThat("truncate should have triggered block recovery.",
+        isReady, is(false));
+
+    checkBlockRecovery(p);
+
+    checkFullFile(p, newLengthBeforeUpgrade, contents);
+    assertFileLength(snapshotFile, startingFileSize);
+    long totalBlockBefore = cluster.getNamesystem().getBlocksTotal();
+
+    restartCluster(StartupOption.UPGRADE);
+
+    assertThat("SafeMode should be OFF",
+        cluster.getNamesystem().isInSafeMode(), is(false));
+    assertThat("NameNode should be performing upgrade.",
+        cluster.getNamesystem().isUpgradeFinalized(), is(false));
+    FileStatus fileStatus = fs.getFileStatus(p);
+    assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));
+
+    int newLengthAfterUpgrade = newLengthBeforeUpgrade - toTruncate;
+    Block oldBlk = getLocatedBlocks(p).getLastLocatedBlock()
+        .getBlock().getLocalBlock();
+    isReady = fs.truncate(p, newLengthAfterUpgrade);
+    assertThat("truncate should have triggered block recovery.",
+        isReady, is(false));
+    fileStatus = fs.getFileStatus(p);
+    assertThat(fileStatus.getLen(), is((long) newLengthAfterUpgrade));
+    assertThat("Should copy on truncate during upgrade",
+        getLocatedBlocks(p).getLastLocatedBlock().getBlock()
+        .getLocalBlock().getBlockId(), is(not(equalTo(oldBlk.getBlockId()))));
+
+    checkBlockRecovery(p);
+
+    checkFullFile(p, newLengthAfterUpgrade, contents);
+    assertThat("Total block count should be unchanged from copy-on-truncate",
+        cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
+
+    restartCluster(StartupOption.ROLLBACK);
+
+    assertThat("File does not exist " + p, fs.exists(p), is(true));
+    fileStatus = fs.getFileStatus(p);
+    assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));
+    checkFullFile(p, newLengthBeforeUpgrade, contents);
+    assertThat("Total block count should be unchanged from rolling back",
+        cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
+
+    restartCluster(StartupOption.REGULAR);
+    assertThat("Total block count should be unchanged from start-up",
+        cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
+    checkFullFile(p, newLengthBeforeUpgrade, contents);
+    assertFileLength(snapshotFile, startingFileSize);
+
+    // empty edits and restart
+    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    fs.saveNamespace();
+    cluster.restartNameNode(true);
+    assertThat("Total block count should be unchanged from start-up",
+        cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
+    checkFullFile(p, newLengthBeforeUpgrade, contents);
+    assertFileLength(snapshotFile, startingFileSize);
+
+    fs.deleteSnapshot(parent, "ss0");
+    fs.delete(parent, true);
+    assertThat("File " + p + " shouldn't exist", fs.exists(p), is(false));
+  }
+
+  /**
    * Check truncate recovery.
    */
   @Test
-  public void testTruncateLastBlock() throws IOException {
+  public void testTruncateRecovery() throws IOException {
     FSNamesystem fsn = cluster.getNamesystem();
-
-    String src = "/file";
+    String client = "client";
+    String clientMachine = "clientMachine";
+    Path parent = new Path("/test");
+    String src = "/test/testTruncateRecovery";
     Path srcPath = new Path(src);
 
     byte[] contents = AppendTestUtil.initBuffer(BLOCK_SIZE);
     writeContents(contents, BLOCK_SIZE, srcPath);
 
-    INodeFile inode = fsn.getFSDirectory().getINode(src).asFile();
-    long oldGenstamp = GenerationStamp.LAST_RESERVED_STAMP;
-    DatanodeDescriptor dn = DFSTestUtil.getLocalDatanodeDescriptor();
-    DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo(
-        dn.getDatanodeUuid(), InetAddress.getLocalHost().getHostAddress());
-    dn.isAlive = true;
-
-    BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
-        new Block(0, 1, oldGenstamp), (short) 1,
-        HdfsServerConstants.BlockUCState.BEING_TRUNCATED,
-        new DatanodeStorageInfo[] {storage});
+    INodesInPath iip = fsn.getFSDirectory().getINodesInPath4Write(src, true);
+    INodeFile file = iip.getLastINode().asFile();
+    long initialGenStamp = file.getLastBlock().getGenerationStamp();
+    // Test that prepareFileForTruncate sets up in-place truncate.
+    fsn.writeLock();
+    try {
+      Block oldBlock = file.getLastBlock();
+      Block truncateBlock =
+          fsn.prepareFileForTruncate(iip, client, clientMachine, 1, null);
+      // In-place truncate uses old block id with new genStamp.
+      assertThat(truncateBlock.getBlockId(),
+          is(equalTo(oldBlock.getBlockId())));
+      assertThat(truncateBlock.getNumBytes(),
+          is(oldBlock.getNumBytes()));
+      assertThat(truncateBlock.getGenerationStamp(),
+          is(fsn.getBlockIdManager().getGenerationStampV2()));
+      assertThat(file.getLastBlock().getBlockUCState(),
+          is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY));
+      long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock())
+          .getBlockRecoveryId();
+      assertThat(blockRecoveryId, is(initialGenStamp + 1));
+      fsn.getEditLog().logTruncate(
+          src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
+    } finally {
+      fsn.writeUnlock();
+    }
 
-    inode.setBlocks(new BlockInfo[] {blockInfo});
+    // Re-create file and ensure we are ready to copy on truncate
+    writeContents(contents, BLOCK_SIZE, srcPath);
+    fs.allowSnapshot(parent);
+    fs.createSnapshot(parent, "ss0");
+    iip = fsn.getFSDirectory().getINodesInPath(src, true);
+    file = iip.getLastINode().asFile();
+    file.recordModification(iip.getLatestSnapshotId(), true);
+    assertThat(file.isBlockInLatestSnapshot(file.getLastBlock()), is(true));
+    initialGenStamp = file.getLastBlock().getGenerationStamp();
+    // Test that prepareFileForTruncate sets up copy-on-write truncate
     fsn.writeLock();
     try {
-      fsn.initializeBlockRecovery(inode);
-      assertThat(inode.getLastBlock().getBlockUCState(),
-          is(HdfsServerConstants.BlockUCState.BEING_TRUNCATED));
-      long blockRecoveryId = ((BlockInfoUnderConstruction) 
inode.getLastBlock())
+      Block oldBlock = file.getLastBlock();
+      Block truncateBlock =
+          fsn.prepareFileForTruncate(iip, client, clientMachine, 1, null);
+      // Copy-on-write truncate makes new block with new id and genStamp
+      assertThat(truncateBlock.getBlockId(),
+          is(not(equalTo(oldBlock.getBlockId()))));
+      assertThat(truncateBlock.getNumBytes() < oldBlock.getNumBytes(),
+          is(true));
+      assertThat(truncateBlock.getGenerationStamp(),
+          is(fsn.getBlockIdManager().getGenerationStampV2()));
+      assertThat(file.getLastBlock().getBlockUCState(),
+          is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY));
+      long blockRecoveryId = ((BlockInfoUnderConstruction) file.getLastBlock())
           .getBlockRecoveryId();
-      assertThat(blockRecoveryId, is(oldGenstamp + 2));
+      assertThat(blockRecoveryId, is(initialGenStamp + 1));
+      fsn.getEditLog().logTruncate(
+          src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
     } finally {
       fsn.writeUnlock();
     }
+    checkBlockRecovery(srcPath);
+    fs.deleteSnapshot(parent, "ss0");
+    fs.delete(parent, true);
   }
 
   static void writeContents(byte[] contents, int fileLength, Path p)
@@ -286,4 +703,38 @@ public class TestFileTruncate {
   static LocatedBlocks getLocatedBlocks(Path src) throws IOException {
     return fs.getClient().getLocatedBlocks(src.toString(), 0, Long.MAX_VALUE);
   }
+
+  static void assertBlockExists(Block blk) {
+    assertNotNull("BlocksMap does not contain block: " + blk,
+        cluster.getNamesystem().getStoredBlock(blk));
+  }
+
+  static void assertBlockNotPresent(Block blk) {
+    assertNull("BlocksMap should not contain block: " + blk,
+        cluster.getNamesystem().getStoredBlock(blk));
+  }
+
+  static void assertFileLength(Path file, long length) throws IOException {
+    byte[] data = DFSTestUtil.readFileBuffer(fs, file);
+    assertEquals("Wrong data size in snapshot.", length, data.length);
+  }
+
+  static void checkFullFile(Path p, int newLength, byte[] contents)
+      throws IOException {
+    AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
+  }
+
+  static void restartCluster(StartupOption o)
+      throws IOException {
+    cluster.shutdown();
+    if(StartupOption.ROLLBACK == o)
+      NameNode.doRollback(conf, false);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM)
+        .format(false)
+        .nameNodePort(NameNode.DEFAULT_PORT)
+        .startupOption(o==StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
+        .dnStartupOption(o!=StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
+        .build();
+    fs = cluster.getFileSystem();
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to