Repository: hadoop
Updated Branches:
  refs/heads/trunk 2aaed1032 -> 96d307e1e


HDFS-9516. Truncate file fails with data dirs on multiple disks. Contributed by 
Plamen Jeliazkov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96d307e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96d307e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96d307e1

Branch: refs/heads/trunk
Commit: 96d307e1e320eafb470faf7bd47af3341c399d55
Parents: 2aaed10
Author: Plamen Jeliazkov <plamenj2...@gmail.com>
Authored: Tue Dec 15 00:10:33 2015 -0800
Committer: Konstantin V Shvachko <s...@apache.org>
Committed: Tue Dec 15 00:32:10 2015 -0800

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 27 +++++++++++---------
 2 files changed, 18 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96d307e1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 75bb07c..c2f6863 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -903,6 +903,9 @@ Release 2.9.0 - UNRELEASED
 
   BUG FIXES
 
+    HDFS-9516. Truncate file fails with data dirs on multiple disks.
+    (Plamen Jeliazkov via shv)
+
 Release 2.8.0 - UNRELEASED
 
   NEW FEATURES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96d307e1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 74610d7..76e65cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2500,8 +2500,14 @@ class FsDatasetImpl implements 
FsDatasetSpi<FsVolumeImpl> {
       } else {
         // Copying block to a new block with new blockId.
         // Not truncating original block.
+        FsVolumeSpi volume = rur.getVolume();
+        String blockPath = blockFile.getAbsolutePath();
+        String volumePath = volume.getBasePath();
+        assert blockPath.startsWith(volumePath) :
+            "New block file: " + blockPath + " must be on " +
+                "same volume as recovery replica: " + volumePath;
         ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
-            newBlockId, recoveryId, rur.getVolume(), blockFile.getParentFile(),
+            newBlockId, recoveryId, volume, blockFile.getParentFile(),
             newlength);
         newReplicaInfo.setNumBytes(newlength);
         volumeMap.add(bpid, newReplicaInfo);
@@ -2517,17 +2523,14 @@ class FsDatasetImpl implements 
FsDatasetSpi<FsVolumeImpl> {
       ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
       throws IOException {
     String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
-    try (FsVolumeReference ref = volumes.getNextVolume(
-        replicaInfo.getVolume().getStorageType(), replicaInfo.getNumBytes())) {
-      FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
-      final File tmpDir = v.getBlockPoolSlice(bpid).getTmpDir();
-      final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
-      final File dstBlockFile = new File(destDir, blockFileName);
-      final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
-      return copyBlockFiles(replicaInfo.getMetaFile(),
-          replicaInfo.getBlockFile(),
-          dstMetaFile, dstBlockFile, true, smallBufferSize, conf);
-    }
+    FsVolumeImpl v = (FsVolumeImpl) replicaInfo.getVolume();
+    final File tmpDir = v.getBlockPoolSlice(bpid).getTmpDir();
+    final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
+    final File dstBlockFile = new File(destDir, blockFileName);
+    final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
+    return copyBlockFiles(replicaInfo.getMetaFile(),
+        replicaInfo.getBlockFile(),
+        dstMetaFile, dstBlockFile, true, smallBufferSize, conf);
   }
 
   @Override // FsDatasetSpi

Reply via email to