Repository: hadoop Updated Branches: refs/heads/branch-2 cc20316b5 -> 9cb6d291e
HDFS-10457. DataNode should not auto-format block pool directory if VERSION is missing. (Wei-Chiu Chuang via lei) (cherry picked from commit bb3bcb9397593fc8a2fa63a48eba126609f72c42) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9cb6d291 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9cb6d291 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9cb6d291 Branch: refs/heads/branch-2 Commit: 9cb6d291ea137a02aa899fe3dd022e96be71f77e Parents: cc20316 Author: Lei Xu <l...@apache.org> Authored: Mon Aug 8 15:54:12 2016 -0700 Committer: Lei Xu <l...@apache.org> Committed: Mon Aug 8 16:19:58 2016 -0700 ---------------------------------------------------------------------- .../server/datanode/BlockPoolSliceStorage.java | 2 +- .../TestDataNodeVolumeFailureReporting.java | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cb6d291/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index 90a4669..fd90ae9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -151,7 +151,7 @@ public class BlockPoolSliceStorage extends Storage { throws IOException { StorageDirectory sd = new StorageDirectory(dataDir, null, true); try { - StorageState curState = sd.analyzeStorage(startOpt, this); + StorageState curState = sd.analyzeStorage(startOpt, this, true); // sd is locked but not opened switch (curState) { case NORMAL: http://git-wip-us.apache.org/repos/asf/hadoop/blob/9cb6d291/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index c76fa2c..2a2fc4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; @@ -480,6 +481,25 @@ public class TestDataNodeVolumeFailureReporting { checkFailuresAtNameNode(dm, dns.get(0), false, dn1Vol1.getAbsolutePath()); } + @Test + public void testAutoFormatEmptyBlockPoolDirectory() throws Exception { + // remove the version file + DataNode dn = cluster.getDataNodes().get(0); + String bpid = cluster.getNamesystem().getBlockPoolId(); + BlockPoolSliceStorage bps = dn.getStorage().getBPStorage(bpid); + Storage.StorageDirectory dir = bps.getStorageDir(0); + File current = dir.getCurrentDir(); + + File currentVersion = new File(current, "VERSION"); + currentVersion.delete(); + // restart the data node + assertTrue(cluster.restartDataNodes(true)); + // the DN should tolerate one volume failure. + cluster.waitActive(); + assertFalse("DataNode should not reformat if VERSION is missing", + currentVersion.exists()); + } + /** * Checks the NameNode for correct values of aggregate counters tracking failed * volumes across all DataNodes. --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org