HDFS-9282. Make data directory count and storage raw capacity related tests FsDataset-agnostic. (Tony Wu via lei)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46e78a7a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46e78a7a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46e78a7a Branch: refs/heads/yarn-2877 Commit: 46e78a7a1694407d176218c20cb7438ab1335490 Parents: 482e35c Author: Lei Xu <l...@apache.org> Authored: Thu Nov 5 10:45:46 2015 -0800 Committer: Lei Xu <l...@apache.org> Committed: Thu Nov 5 10:47:33 2015 -0800 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/MiniDFSCluster.java | 2 ++ .../datanode/FsDatasetImplTestUtilsFactory.java | 5 +++++ .../server/datanode/FsDatasetTestUtils.java | 19 ++++++++++++++++++ .../TestDataNodeMultipleRegistrations.java | 8 ++++++-- .../fsdataset/impl/FsDatasetImplTestUtils.java | 21 ++++++++++++++++++++ .../namenode/TestNamenodeCapacityReport.java | 14 ++++++------- 7 files changed, 62 insertions(+), 10 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/46e78a7a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7246a57..3be67c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1674,6 +1674,9 @@ Release 2.8.0 - UNRELEASED HDFS-9372. Remove dead code in DataStorage.recoverTransitionRead. (Duo Zhang via wheat9) + HDFS-9282. Make data directory count and storage raw capacity related tests + FsDataset-agnostic. (Tony Wu via lei) + BUG FIXES HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. http://git-wip-us.apache.org/repos/asf/hadoop/blob/46e78a7a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index c81f154..6baea25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -192,6 +192,8 @@ public class MiniDFSCluster { public Builder(Configuration conf) { this.conf = conf; + this.storagesPerDatanode = + FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs(); } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/46e78a7a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetImplTestUtilsFactory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetImplTestUtilsFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetImplTestUtilsFactory.java index f283f5a..b113b34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetImplTestUtilsFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetImplTestUtilsFactory.java @@ -29,4 +29,9 @@ public final class FsDatasetImplTestUtilsFactory public FsDatasetTestUtils newInstance(DataNode datanode) { return new FsDatasetImplTestUtils(datanode); } + + @Override + public int getDefaultNumOfDataDirs() { + return FsDatasetImplTestUtils.DEFAULT_NUM_OF_DATA_DIRS; + } } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/46e78a7a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java index 02af467..51cb2bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java @@ -67,6 +67,14 @@ public interface FsDatasetTestUtils { public boolean isSimulated() { return false; } + + /** + * Get the default number of data directories for underlying storage per + * DataNode. + * + * @return The default number of data dirs per DataNode. + */ + abstract public int getDefaultNumOfDataDirs(); } /** @@ -213,4 +221,15 @@ public interface FsDatasetTestUtils { * @return Replica for the block. */ Replica fetchReplica(ExtendedBlock block); + + /** + * @return The default value of number of data dirs per DataNode in + * MiniDFSCluster. + */ + int getDefaultNumOfDataDirs(); + + /** + * Obtain the raw capacity of underlying storage per DataNode. + */ + long getRawCapacity() throws IOException; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/46e78a7a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java index 55c7b73..8e1e236 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java @@ -97,7 +97,9 @@ public class TestDataNodeMultipleRegistrations { LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue()); } // number of volumes should be 2 - [data1, data2] - assertEquals("number of volumes is wrong", 2, volInfos.size()); + assertEquals("number of volumes is wrong", + cluster.getFsDatasetTestUtils(0).getDefaultNumOfDataDirs(), + volInfos.size()); for (BPOfferService bpos : dn.getAllBpOs()) { LOG.info("BP: " + bpos); @@ -164,7 +166,9 @@ public class TestDataNodeMultipleRegistrations { LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue()); } // number of volumes should be 2 - [data1, data2] - assertEquals("number of volumes is wrong", 2, volInfos.size()); + assertEquals("number of volumes is wrong", + cluster.getFsDatasetTestUtils(0).getDefaultNumOfDataDirs(), + volInfos.size()); for (BPOfferService bpos : dn.getAllBpOs()) { LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid=" http://git-wip-us.apache.org/repos/asf/hadoop/blob/46e78a7a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index 1ce6b11..0a32102 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -24,6 +24,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.DF; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -58,6 +59,11 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils { private final FsDatasetImpl dataset; /** + * By default we assume 2 data directories (volumes) per DataNode. + */ + public static final int DEFAULT_NUM_OF_DATA_DIRS = 2; + + /** * A reference to the replica that is used to corrupt block / meta later. */ private static class FsDatasetImplMaterializedReplica @@ -322,4 +328,19 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils { public Replica fetchReplica(ExtendedBlock block) { return dataset.fetchReplicaInfo(block.getBlockPoolId(), block.getBlockId()); } + + @Override + public int getDefaultNumOfDataDirs() { + return this.DEFAULT_NUM_OF_DATA_DIRS; + } + + @Override + public long getRawCapacity() throws IOException { + try (FsVolumeReferences volRefs = dataset.getFsVolumeReferences()) { + Preconditions.checkState(volRefs.size() != 0); + DF df = new DF(new File(volRefs.get(0).getBasePath()), + dataset.datanode.getConf()); + return df.getCapacity(); + } + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/46e78a7a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java index 0b81435..99323f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -29,7 +28,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.DF; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSOutputStream; @@ -45,6 +43,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils; import org.junit.Test; @@ -110,9 +109,7 @@ public class TestNamenodeCapacityReport { assertTrue(percentBpUsed == DFSUtilClient.getPercentUsed(bpUsed, configCapacity)); } - - DF df = new DF(new File(cluster.getDataDirectory()), conf); - + // // Currently two data directories are created by the data node // in the MiniDFSCluster. This results in each data directory having @@ -123,9 +120,10 @@ public class TestNamenodeCapacityReport { // So multiply the disk capacity and reserved space by two // for accommodating it // - int numOfDataDirs = 2; - - long diskCapacity = numOfDataDirs * df.getCapacity(); + final FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(0); + int numOfDataDirs = utils.getDefaultNumOfDataDirs(); + + long diskCapacity = numOfDataDirs * utils.getRawCapacity(); reserved *= numOfDataDirs; configCapacity = namesystem.getCapacityTotal();