HDFS-9292. Make TestFileConcorruption independent to underlying FsDataset 
Implementation. (lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/399ad009
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/399ad009
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/399ad009

Branch: refs/heads/HDFS-8966
Commit: 399ad009158cbc6aca179396d390fe770801420f
Parents: d8736eb
Author: Lei Xu <l...@apache.org>
Authored: Mon Oct 26 16:08:06 2015 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Oct 26 16:09:22 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../apache/hadoop/hdfs/TestFileCorruption.java  | 65 ++++++++------------
 2 files changed, 30 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/399ad009/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e26abcc..f6904c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1581,6 +1581,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8945. Update the description about replica placement in HDFS
     Architecture documentation. (Masatake Iwasaki via wang)
 
+    HDFS-9292. Make TestFileConcorruption independent to underlying FsDataset
+    Implementation. (lei)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/399ad009/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 8e0ffe7..c1a7ebb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -24,20 +24,16 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
-import java.io.File;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
+import java.util.Map;
 
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.filefilter.DirectoryFileFilter;
-import org.apache.commons.io.filefilter.PrefixFileFilter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -45,6 +41,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
@@ -74,17 +71,17 @@ public class TestFileCorruption {
       FileSystem fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
       // Now deliberately remove the blocks
-      File storageDir = cluster.getInstanceStorageDir(2, 0);
       String bpid = cluster.getNamesystem().getBlockPoolId();
-      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-      assertTrue("data directory does not exist", data_dir.exists());
-      Collection<File> blocks = FileUtils.listFiles(data_dir,
-          new PrefixFileFilter(Block.BLOCK_FILE_PREFIX),
-          DirectoryFileFilter.DIRECTORY);
-      assertTrue("Blocks do not exist in data-dir", blocks.size() > 0);
-      for (File block : blocks) {
-        System.out.println("Deliberately removing file " + block.getName());
-        assertTrue("Cannot remove file.", block.delete());
+      DataNode dn = cluster.getDataNodes().get(2);
+      Map<DatanodeStorage, BlockListAsLongs> blockReports =
+          dn.getFSDataset().getBlockReports(bpid);
+      assertTrue("Blocks do not exist on data-dir", !blockReports.isEmpty());
+      for (BlockListAsLongs report : blockReports.values()) {
+        for (BlockReportReplica brr : report) {
+          LOG.info("Deliberately removing block {}", brr.getBlockName());
+          cluster.getFsDatasetTestUtils(2).getMaterializedReplica(
+              new ExtendedBlock(bpid, brr)).deleteData();
+        }
       }
       assertTrue("Corrupted replicas not handled properly.",
                  util.checkFiles(fs, "/srcdat"));
@@ -110,7 +107,7 @@ public class TestFileCorruption {
     // Now attempt to read the file
     DataInputStream dis = fs.open(file, 512);
     try {
-      System.out.println("A ChecksumException is expected to be logged.");
+      LOG.info("A ChecksumException is expected to be logged.");
       dis.readByte();
     } catch (ChecksumException ignore) {
       //expect this exception but let any NPE get thrown
@@ -137,15 +134,7 @@ public class TestFileCorruption {
       
       // get the block
       final String bpid = cluster.getNamesystem().getBlockPoolId();
-      File storageDir = cluster.getInstanceStorageDir(0, 0);
-      File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-      assertTrue("Data directory does not exist", dataDir.exists());
-      ExtendedBlock blk = getBlock(bpid, dataDir);
-      if (blk == null) {
-        storageDir = cluster.getInstanceStorageDir(0, 1);
-        dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
-        blk = getBlock(bpid, dataDir);
-      }
+      ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
       assertFalse("Data directory does not contain any blocks or there was an "
           + "IO error", blk==null);
 
@@ -173,20 +162,20 @@ public class TestFileCorruption {
       //clean up
       fs.delete(FILE_PATH, false);
     } finally {
-      if (cluster != null) { cluster.shutdown(); }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
-    
   }
   
-  public static ExtendedBlock getBlock(String bpid, File dataDir) {
-    List<File> metadataFiles = 
MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
-    if (metadataFiles == null || metadataFiles.isEmpty()) {
-      return null;
+  private static ExtendedBlock getFirstBlock(DataNode dn, String bpid) {
+    Map<DatanodeStorage, BlockListAsLongs> blockReports =
+        dn.getFSDataset().getBlockReports(bpid);
+    for (BlockListAsLongs blockLongs : blockReports.values()) {
+      for (BlockReportReplica block : blockLongs) {
+        return new ExtendedBlock(bpid, block);
+      }
     }
-    File metadataFile = metadataFiles.get(0);
-    File blockFile = Block.metaToBlockFile(metadataFile);
-    return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()),
-        blockFile.length(), Block.getGenerationStamp(metadataFile.getName()));
+    return null;
   }
-
 }

Reply via email to