This is an automated email from the ASF dual-hosted git repository.

inigoiri pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 73eccd6d7c57 HDFS-16740. Mini cluster test flakiness (#4835)
73eccd6d7c57 is described below

commit 73eccd6d7c5751dfdb6460a6452683cdea539d2e
Author: Steve Vaughan <em...@stevevaughan.me>
AuthorDate: Tue Oct 10 16:51:46 2023 -0400

    HDFS-16740. Mini cluster test flakiness (#4835)
---
 .../apache/hadoop/hdfs/AdminStatesBaseTest.java    |  12 ++-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java     |   5 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java     |   8 +-
 .../hadoop/hdfs/TestDecommissionWithStriped.java   |  27 +++---
 .../hadoop/hdfs/TestReconstructStripedFile.java    |   8 +-
 .../org/apache/hadoop/hdfs/TestRollingUpgrade.java |  80 +++++++---------
 .../hadoop/hdfs/qjournal/MiniJournalCluster.java   |  11 ++-
 .../hadoop/hdfs/qjournal/MiniQJMHACluster.java     |   8 ++
 .../hdfs/server/datanode/TestBPOfferService.java   |  21 ++---
 .../datanode/TestDataNodeRollingUpgrade.java       |   7 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java      | 105 ++++++++-------------
 .../hdfs/server/namenode/TestNameNodeMXBean.java   |  37 ++++----
 .../server/namenode/ha/TestRetryCacheWithHA.java   |   9 +-
 13 files changed, 172 insertions(+), 166 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
index 82a983004dfe..bf25958b69dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -29,6 +30,8 @@ import java.util.Map;
 import java.util.Random;
 
 import org.apache.hadoop.util.Lists;
+import org.junit.Rule;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -64,6 +67,9 @@ public class AdminStatesBaseTest {
 
   final private Random myrand = new Random();
 
+  @Rule
+  public TemporaryFolder baseDir = new TemporaryFolder();
+
   private HostsFileWriter hostsFileWriter;
   private Configuration conf;
   private MiniDFSCluster cluster = null;
@@ -396,7 +402,7 @@ public class AdminStatesBaseTest {
   protected void startCluster(int numNameNodes, int numDatanodes,
       boolean setupHostsFile, long[] nodesCapacity,
       boolean checkDataNodeHostConfig, boolean federation) throws IOException {
-    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
+    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot())
         .numDataNodes(numDatanodes);
     if (federation) {
       builder.nnTopology(
@@ -431,7 +437,7 @@ public class AdminStatesBaseTest {
 
 
   protected void startSimpleHACluster(int numDatanodes) throws IOException {
-    cluster = new MiniDFSCluster.Builder(conf)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(
         numDatanodes).build();
     cluster.transitionToActive(0);
@@ -458,6 +464,6 @@ public class AdminStatesBaseTest {
       throws IOException {
     assertTrue(fileSys.exists(name));
     fileSys.delete(name, true);
-    assertTrue(!fileSys.exists(name));
+    assertFalse(fileSys.exists(name));
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index f5638871d404..ca8ae04bbf75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -248,7 +248,10 @@ public class MiniDFSCluster implements AutoCloseable {
             "MiniDFSCluster base directory cannot be null");
       }
       String cdir = conf.get(HDFS_MINIDFS_BASEDIR);
-      if (cdir != null) {
+      // There are tests which restart server, and we want to allow them to 
restart with the same
+      // configuration.  Although it is an error if the base directory is 
already set, we'll ignore
+      // cases where the base directory is the same.
+      if (cdir != null && !cdir.equals(basedir.getAbsolutePath())) {
         throw new IllegalArgumentException(
             "MiniDFSCluster base directory already defined (" + cdir + ")");
       }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index 2369d42482a4..9d3bdbf4e952 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -86,6 +87,9 @@ public class TestDFSStripedInputStream {
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
+  @Rule
+  public TemporaryFolder baseDir = new TemporaryFolder();
+
   public ErasureCodingPolicy getEcPolicy() {
     return StripedFileTestUtil.getDefaultECPolicy();
   }
@@ -110,14 +114,12 @@ public class TestDFSStripedInputStream {
           CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY,
           NativeRSRawErasureCoderFactory.CODER_NAME);
     }
-    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
-        GenericTestUtils.getRandomizedTempPath());
     SimulatedFSDataset.setFactory(conf);
     startUp();
   }
 
   private void startUp() throws IOException {
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).numDataNodes(
         dataBlocks + parityBlocks).build();
     cluster.waitActive();
     for (DataNode dn : cluster.getDataNodes()) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
index 206f75eae703..1de8fc17ee80 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -62,11 +63,12 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.PathUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -90,7 +92,10 @@ public class TestDecommissionWithStriped {
   private Path decommissionDir;
   private Path hostsFile;
   private Path excludeFile;
-  private FileSystem localFileSys;
+  private LocalFileSystem localFileSys;
+
+  @Rule
+  public TemporaryFolder baseDir = new TemporaryFolder();
 
   private Configuration conf;
   private MiniDFSCluster cluster;
@@ -118,9 +123,9 @@ public class TestDecommissionWithStriped {
     conf = createConfiguration();
     // Set up the hosts/exclude files.
     localFileSys = FileSystem.getLocal(conf);
+    localFileSys.setWorkingDirectory(new Path(baseDir.getRoot().getPath()));
     Path workingDir = localFileSys.getWorkingDirectory();
-    decommissionDir = new Path(workingDir,
-        PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
+    decommissionDir = new Path(workingDir, "work-dir/decommission");
     hostsFile = new Path(decommissionDir, "hosts");
     excludeFile = new Path(decommissionDir, "exclude");
     writeConfigFile(hostsFile, null);
@@ -582,16 +587,14 @@ public class TestDecommissionWithStriped {
       localFileSys.delete(name, true);
     }
 
-    FSDataOutputStream stm = localFileSys.create(name);
-
-    if (nodes != null) {
-      for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
-        String node = it.next();
-        stm.writeBytes(node);
-        stm.writeBytes("\n");
+    try (FSDataOutputStream stm = localFileSys.create(name)) {
+      if (nodes != null) {
+        for (String node: nodes) {
+          stm.writeBytes(node);
+          stm.writeBytes("\n");
+        }
       }
     }
-    stm.close();
   }
 
   private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index cda714b2cb3f..2f734c62f420 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -41,6 +41,8 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.server.datanode.erasurecode.ErasureCodingTestHelper;
 import org.apache.hadoop.io.ElasticByteBufferPool;
+import org.junit.Rule;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -96,6 +98,9 @@ public class TestReconstructStripedFile {
     Any
   }
 
+  @Rule
+  public TemporaryFolder baseDir = new TemporaryFolder();
+
   private Configuration conf;
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
@@ -150,8 +155,7 @@ public class TestReconstructStripedFile {
         getPendingTimeout());
     conf.setBoolean(DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_VALIDATION_KEY,
         isValidationEnabled());
-    File basedir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, basedir).numDataNodes(dnNum)
+    cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(dnNum)
         .build();
     cluster.waitActive();
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index 495195a0c591..cd05994b5bdc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -86,28 +86,7 @@ public class TestRollingUpgrade {
   }
 
   @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  /**
-   * Create a default HDFS configuration which has test-specific data 
directories.  This is
-   * intended to protect against interactions between test runs that might 
corrupt results.  Each
-   * test run's data is automatically cleaned-up by JUnit.
-   *
-   * @return a default configuration with test-specific data directories
-   */
-  public Configuration getHdfsConfiguration() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-
-    // Override the file system locations with test-specific temporary folders
-    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
-        folder.newFolder("dfs/name").toString());
-    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
-        folder.newFolder("dfs/namesecondary").toString());
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
-        folder.newFolder("dfs/data").toString());
-
-    return conf;
-  }
+  public TemporaryFolder baseDir = new TemporaryFolder();
 
   /**
    * Test DFSAdmin Upgrade Command.
@@ -115,8 +94,10 @@ public class TestRollingUpgrade {
   @Test
   public void testDFSAdminRollingUpgradeCommands() throws Exception {
     // start a cluster
-    final Configuration conf = getHdfsConfiguration();
-    try (MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(0).build()) {
+    final Configuration conf = new HdfsConfiguration();
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot())
+        .numDataNodes(0)
+        .build()) {
       cluster.waitActive();
 
       final Path foo = new Path("/foo");
@@ -197,14 +178,15 @@ public class TestRollingUpgrade {
     LOG.info("nn1Dir=" + nn1Dir);
     LOG.info("nn2Dir=" + nn2Dir);
 
-    final Configuration conf = getHdfsConfiguration();
-    try (MiniJournalCluster mjc = new 
MiniJournalCluster.Builder(conf).build()) {
+    final Configuration conf = new HdfsConfiguration();
+    try (MiniJournalCluster mjc = new MiniJournalCluster.Builder(conf, 
baseDir.getRoot())
+        .build()) {
       mjc.waitActive();
       setConf(conf, nn1Dir, mjc);
 
       {
         // Start the cluster once to generate the dfs dirs
-        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot())
             .numDataNodes(0)
             .manageNameDfsDirs(false)
             .checkExitOnShutdown(false)
@@ -224,7 +206,7 @@ public class TestRollingUpgrade {
             new Path(nn2Dir.getAbsolutePath()), false, conf);
 
         // Start the cluster again
-        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot())
             .numDataNodes(0)
             .format(false)
             .manageNameDfsDirs(false)
@@ -335,8 +317,10 @@ public class TestRollingUpgrade {
   @Test
   public void testRollback() throws Exception {
     // start a cluster
-    final Configuration conf = getHdfsConfiguration();
-    try (MiniDFSCluster cluster  = new 
MiniDFSCluster.Builder(conf).numDataNodes(1).build()) {
+    final Configuration conf = new HdfsConfiguration();
+    try (MiniDFSCluster cluster  = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot())
+        .numDataNodes(1)
+        .build()) {
       cluster.waitActive();
 
       final Path foo = new Path("/foo");
@@ -429,8 +413,10 @@ public class TestRollingUpgrade {
   @Test
   public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
     // start a cluster
-    final Configuration conf = getHdfsConfiguration();
-    try (MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(1).build()) {
+    final Configuration conf = new HdfsConfiguration();
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot())
+        .numDataNodes(1)
+        .build()) {
       cluster.waitActive();
       final DFSAdmin dfsadmin = new DFSAdmin(conf);
       DataNode dn = cluster.getDataNodes().get(0);
@@ -480,13 +466,14 @@ public class TestRollingUpgrade {
 
   private void testFinalize(int nnCount, boolean skipImageDeltaCheck)
       throws Exception {
-    final Configuration conf = getHdfsConfiguration();
+    final Configuration conf = new HdfsConfiguration();
     MiniQJMHACluster cluster = null;
     final Path foo = new Path("/foo");
     final Path bar = new Path("/bar");
 
     try {
-      cluster = new 
MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build();
+      cluster = new MiniQJMHACluster.Builder(conf, baseDir.getRoot())
+          .setNumNameNodes(nnCount).build();
       MiniDFSCluster dfsCluster = cluster.getDfsCluster();
       dfsCluster.waitActive();
 
@@ -546,8 +533,10 @@ public class TestRollingUpgrade {
   }
 
   private void testQuery(int nnCount) throws Exception{
-    final Configuration conf = getHdfsConfiguration();
-    try (MiniQJMHACluster cluster = new 
MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount).build()) {
+    final Configuration conf = new HdfsConfiguration();
+    try (MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf, 
baseDir.getRoot())
+        .setNumNameNodes(nnCount)
+        .build()) {
       MiniDFSCluster dfsCluster = cluster.getDfsCluster();
       dfsCluster.waitActive();
 
@@ -582,8 +571,10 @@ public class TestRollingUpgrade {
 
   @Test (timeout = 300000)
   public void testQueryAfterRestart() throws IOException, InterruptedException 
{
-    final Configuration conf = getHdfsConfiguration();
-    try (MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(0).build()) {
+    final Configuration conf = new HdfsConfiguration();
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot())
+        .numDataNodes(0)
+        .build()) {
       cluster.waitActive();
       DistributedFileSystem dfs = cluster.getFileSystem();
 
@@ -612,14 +603,14 @@ public class TestRollingUpgrade {
 
   @Test(timeout = 60000)
   public void testRollBackImage() throws Exception {
-    final Configuration conf = getHdfsConfiguration();
+    final Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10);
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 2);
     MiniQJMHACluster cluster = null;
     CheckpointFaultInjector old = CheckpointFaultInjector.getInstance();
     try {
-      cluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(2).build();
+      cluster = new MiniQJMHACluster.Builder(conf, 
baseDir.getRoot()).setNumNameNodes(2).build();
       MiniDFSCluster dfsCluster = cluster.getDfsCluster();
       dfsCluster.waitActive();
       dfsCluster.transitionToActive(0);
@@ -657,13 +648,14 @@ public class TestRollingUpgrade {
   }
 
   public void testCheckpoint(int nnCount) throws IOException, 
InterruptedException {
-    final Configuration conf = getHdfsConfiguration();
+    final Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
 
     final Path foo = new Path("/foo");
 
-    try (MiniQJMHACluster cluster = new 
MiniQJMHACluster.Builder(conf).setNumNameNodes(nnCount)
+    try (MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf, 
baseDir.getRoot())
+        .setNumNameNodes(nnCount)
         .build()) {
       MiniDFSCluster dfsCluster = cluster.getDfsCluster();
       dfsCluster.waitActive();
@@ -767,8 +759,8 @@ public class TestRollingUpgrade {
     SecondaryNameNode snn = null;
 
     try {
-      Configuration conf = getHdfsConfiguration();
-      cluster = new MiniDFSCluster.Builder(conf).build();
+      Configuration conf = new HdfsConfiguration();
+      cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build();
       cluster.waitActive();
 
       conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index 1c43b39159a9..c5b8b79e3ca8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.qjournal;
 
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.FAKE_NSINFO;
-import static org.junit.Assert.fail;
 
 import java.io.Closeable;
 import java.io.File;
@@ -64,7 +63,12 @@ public final class MiniJournalCluster implements Closeable {
     public Builder(Configuration conf) {
       this.conf = conf;
     }
-    
+
+    public Builder(Configuration conf, File baseDir) {
+      this.conf = conf;
+      baseDir(baseDir.toString());
+    }
+
     public Builder baseDir(String d) {
       this.baseDir = d;
       return this;
@@ -289,7 +293,8 @@ public final class MiniJournalCluster implements Closeable {
           }
         }, 50, 3000);
       } catch (TimeoutException e) {
-        fail("Time out while waiting for journal node " + index + " to 
start.");
+        throw new AssertionError("Time out while waiting for journal node " + 
index +
+            " to start.");
       } catch (InterruptedException ite) {
         LOG.warn("Thread interrupted when waiting for node start", ite);
       }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index 0791e0ace1c0..dd0ccd729efb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import 
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 
+import java.io.File;
 import java.io.IOException;
 import java.net.BindException;
 import java.net.URI;
@@ -60,6 +61,13 @@ public class MiniQJMHACluster implements AutoCloseable {
       this.dfsBuilder = new MiniDFSCluster.Builder(conf).numDataNodes(0);
     }
 
+    public Builder(Configuration conf, File baseDir) {
+      this.conf = conf;
+      // most QJMHACluster tests don't need DataNodes, so we'll make
+      // this the default
+      this.dfsBuilder = new MiniDFSCluster.Builder(conf, 
baseDir).numDataNodes(0);
+    }
+
     public MiniDFSCluster.Builder getDfsBuilder() {
       return dfsBuilder;
     }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index 9d4b0db0804f..65855427d725 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -89,7 +89,9 @@ import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
 import org.junit.Before;
 import org.junit.After;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
@@ -120,6 +122,9 @@ public class TestBPOfferService {
     GenericTestUtils.setLogLevel(DataNode.LOG, Level.TRACE);
   }
 
+  @Rule
+  public TemporaryFolder baseDir = new TemporaryFolder();
+
   private DatanodeProtocolClientSideTranslatorPB mockNN1;
   private DatanodeProtocolClientSideTranslatorPB mockNN2;
   private final NNHAStatusHeartbeat[] mockHaStatuses =
@@ -1254,8 +1259,7 @@ public class TestBPOfferService {
   @Test(timeout = 15000)
   public void testCommandProcessingThread() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
-    try {
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).build()) {
       List<DataNode> datanodes = cluster.getDataNodes();
       assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
@@ -1272,19 +1276,14 @@ public class TestBPOfferService {
       // Check new metric result about processedCommandsOp.
       // One command send back to DataNode here is #FinalizeCommand.
       assertCounter("ProcessedCommandsOpNumOps", 1L, mrb);
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
     }
   }
 
   @Test(timeout = 5000)
   public void testCommandProcessingThreadExit() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
-        numDataNodes(1).build();
-    try {
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).
+        numDataNodes(1).build()) {
       List<DataNode> datanodes = cluster.getDataNodes();
       DataNode dataNode = datanodes.get(0);
       List<BPOfferService> allBpOs = dataNode.getAllBpOs();
@@ -1294,10 +1293,6 @@ public class TestBPOfferService {
       // Stop and wait util actor exit.
       actor.stopCommandProcessingThread();
       GenericTestUtils.waitFor(() -> !actor.isAlive(), 100, 3000);
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
     }
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
index ae00dbe6f8cf..fdde4cabe6bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java
@@ -27,6 +27,8 @@ import java.util.Random;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.junit.Rule;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -65,6 +67,9 @@ public class TestDataNodeRollingUpgrade {
   private static final long FILE_SIZE = BLOCK_SIZE;
   private static final long SEED = 0x1BADF00DL;
 
+  @Rule
+  public TemporaryFolder baseDir = new TemporaryFolder();
+
   Configuration conf;
   MiniDFSCluster cluster = null;
   DistributedFileSystem fs = null;
@@ -75,7 +80,7 @@ public class TestDataNodeRollingUpgrade {
   private void startCluster() throws IOException {
     conf = new HdfsConfiguration();
     conf.setInt("dfs.blocksize", 1024*1024);
-    cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build();
+    cluster = new Builder(conf, 
baseDir.getRoot()).numDataNodes(REPL_FACTOR).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     nn = cluster.getNameNode(0);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 7d6e7980be5c..1782a4644bc9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CORRUPT_BLOCK_DELETE_IMMEDIATELY_ENABLED;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.MiniDFSCluster.HDFS_MINIDFS_BASEDIR;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -123,7 +122,9 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.LoggerFactory;
 
 /**
@@ -176,6 +177,9 @@ public class TestFsck {
     return bStream.toString();
   }
 
+  @Rule
+  public TemporaryFolder baseDir = new TemporaryFolder();
+
   private MiniDFSCluster cluster = null;
   private Configuration conf = null;
 
@@ -217,8 +221,7 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
         precision);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(4).build();
     fs = cluster.getFileSystem();
     final String fileName = "/srcdat";
@@ -235,7 +238,7 @@ public class TestFsck {
     shutdownCluster();
 
     // restart the cluster; bring up namenode but not the data nodes
-    cluster = new MiniDFSCluster.Builder(conf)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(0).format(false).build();
     outStr = runFsck(conf, 1, true, "/");
     // expect the result is corrupt
@@ -282,8 +285,7 @@ public class TestFsck {
         setNumFiles(20).build();
     FileSystem fs = null;
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(4).build();
     fs = cluster.getFileSystem();
     util.createFiles(fs, "/srcdat");
@@ -302,8 +304,7 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
 
     // Create a cluster with the current user, write some files
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(4).build();
     final MiniDFSCluster c2 = cluster;
     final String dir = "/dfsck";
@@ -350,8 +351,7 @@ public class TestFsck {
     DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3,
         (5 * dfsBlockSize) + (dfsBlockSize - 1), 5 * dfsBlockSize);
     FileSystem fs = null;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(numDatanodes).build();
     String topDir = "/srcdat";
     fs = cluster.getFileSystem();
@@ -558,8 +558,7 @@ public class TestFsck {
     FileSystem fs = null;
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(4).build();
     String topDir = "/srcdat";
     fs = cluster.getFileSystem();
@@ -624,8 +623,7 @@ public class TestFsck {
         setNumFiles(4).build();
     FileSystem fs = null;
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(4).build();
     String topDir = "/srcdat";
     String randomString = "HADOOP  ";
@@ -679,8 +677,7 @@ public class TestFsck {
     final int numAllUnits = dataBlocks + ecPolicy.getNumParityUnits();
     int blockSize = 2 * cellSize;
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).numDataNodes(
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).numDataNodes(
         numAllUnits + 1).build();
     String topDir = "/myDir";
     cluster.waitActive();
@@ -771,8 +768,7 @@ public class TestFsck {
     String outStr = null;
     short factor = 1;
 
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(1).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
@@ -844,8 +840,7 @@ public class TestFsck {
     Random random = new Random();
     String outStr = null;
     short factor = 1;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(2).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
@@ -918,8 +913,7 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
     DistributedFileSystem dfs;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
@@ -1065,8 +1059,7 @@ public class TestFsck {
   @Test
   public void testFsckError() throws Exception {
     // bring up a one-node cluster
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build();
     String fileName = "/test.txt";
     Path filePath = new Path(fileName);
     FileSystem fs = cluster.getFileSystem();
@@ -1098,8 +1091,7 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
     FileSystem fs = null;
 
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     DFSTestUtil util = new DFSTestUtil.Builder().
@@ -1158,8 +1150,7 @@ public class TestFsck {
   @Test
   public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
     // bring up a one-node cluster
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build();
     String fileName = "/test.txt";
     Path filePath = new Path(fileName);
     FileSystem fs = cluster.getFileSystem();
@@ -1203,8 +1194,7 @@ public class TestFsck {
     DistributedFileSystem dfs = null;
     
     // Startup a minicluster
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(numReplicas).build();
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
@@ -1265,8 +1255,7 @@ public class TestFsck {
     DistributedFileSystem dfs = null;
     
     // Startup a minicluster
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
     assertNotNull("Failed Cluster Creation", cluster);
     cluster.waitClusterUp();
@@ -1374,8 +1363,7 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
         precision);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(4).build();
     fs = cluster.getFileSystem();
     final String fileName = "/srcdat";
@@ -1402,8 +1390,7 @@ public class TestFsck {
    */
   @Test
   public void testFsckForSnapshotFiles() throws Exception {
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).numDataNodes(1)
+    cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(1)
         .build();
     String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
         "-files");
@@ -1438,8 +1425,7 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
 
     DistributedFileSystem dfs = null;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
@@ -1493,8 +1479,7 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
 
     DistributedFileSystem dfs;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
@@ -1578,8 +1563,7 @@ public class TestFsck {
         replFactor);
 
     DistributedFileSystem dfs;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(numDn)
         .hosts(hosts)
         .racks(racks)
@@ -1785,8 +1769,7 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
     DistributedFileSystem dfs = null;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
@@ -1899,8 +1882,7 @@ public class TestFsck {
    */
   @Test
   public void testStoragePoliciesCK() throws Exception {
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(3)
         .storageTypes(
             new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
@@ -1943,8 +1925,7 @@ public class TestFsck {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 
     DistributedFileSystem dfs;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(numDn).hosts(hosts).racks(racks).build();
 
     assertNotNull("Failed Cluster Creation", cluster);
@@ -2025,8 +2006,7 @@ public class TestFsck {
         replFactor);
 
     DistributedFileSystem dfs;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(numDn)
         .hosts(hosts)
         .racks(racks)
@@ -2134,8 +2114,7 @@ public class TestFsck {
     int parityBlocks =
         StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
     int totalSize = dataBlocks + parityBlocks;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(totalSize).build();
     fs = cluster.getFileSystem();
     fs.enableErasureCodingPolicy(
@@ -2170,7 +2149,7 @@ public class TestFsck {
     shutdownCluster();
 
     // restart the cluster; bring up namenode but not the data nodes
-    cluster = new MiniDFSCluster.Builder(conf)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(0).format(false).build();
     outStr = runFsck(conf, 1, true, "/", "-files", "-blocks");
     // expect the result is corrupt
@@ -2202,8 +2181,7 @@ public class TestFsck {
 
     int numFiles = 3;
     int numSnapshots = 0;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build();
     cluster.waitActive();
     hdfs = cluster.getFileSystem();
     DFSTestUtil util = new DFSTestUtil.Builder().
@@ -2297,8 +2275,7 @@ public class TestFsck {
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replication);
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).build();
     DistributedFileSystem dfs = cluster.getFileSystem();
     cluster.waitActive();
 
@@ -2393,7 +2370,6 @@ public class TestFsck {
     HostsFileWriter hostsFileWriter = new HostsFileWriter();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
-    conf.set(HDFS_MINIDFS_BASEDIR, GenericTestUtils.getRandomizedTempPath());
     if (defineUpgradeDomain) {
       conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
           CombinedHostFileManager.class, HostConfigManager.class);
@@ -2401,7 +2377,7 @@ public class TestFsck {
     }
 
     DistributedFileSystem dfs;
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDN).
+    cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(numDN).
         hosts(hosts).racks(racks).build();
     cluster.waitClusterUp();
     dfs = cluster.getFileSystem();
@@ -2445,8 +2421,7 @@ public class TestFsck {
         StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
     int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
     int totalSize = dataBlocks + parityBlocks;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(totalSize).build();
     fs = cluster.getFileSystem();
     fs.enableErasureCodingPolicy(
@@ -2517,8 +2492,7 @@ public class TestFsck {
         StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
     int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
     int totalSize = dataBlocks + parityBlocks;
-    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
-    cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .numDataNodes(totalSize).build();
     fs = cluster.getFileSystem();
     fs.enableErasureCodingPolicy(
@@ -2641,8 +2615,7 @@ public class TestFsck {
   @Test(timeout = 300000)
   public void testFsckCorruptWhenOneReplicaIsCorrupt()
       throws Exception {
-    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf,
-        new File(GenericTestUtils.getRandomizedTempPath()))
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot())
         .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(2)
         .build()) {
       cluster.waitActive();
@@ -2671,7 +2644,7 @@ public class TestFsck {
 
   @Test
   public void testFsckNonPrivilegedListCorrupt() throws Exception {
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(4).build();
     UserGroupInformation ugi = 
UserGroupInformation.createUserForTesting("systest", new String[]{""});
     ugi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index d670025bf506..bc46c4857a15 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -57,8 +57,10 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 import org.junit.Assert;
+import org.junit.Rule;
 import org.junit.Test;
 import org.eclipse.jetty.util.ajax.JSON;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -97,6 +99,9 @@ public class TestNameNodeMXBean {
    */
   private static final double DELTA = 0.000001;
 
+  @Rule
+  public TemporaryFolder baseDir = new TemporaryFolder();
+
   static {
     NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
   }
@@ -112,7 +117,7 @@ public class TestNameNodeMXBean {
     MiniDFSCluster cluster = null;
 
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+      cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(4).build();
       cluster.waitActive();
 
       // Set upgrade domain on the first DN.
@@ -139,7 +144,7 @@ public class TestNameNodeMXBean {
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
       assertEquals(fsn.getClusterId(), clusterId);
       // get attribute "BlockPoolId"
-      String blockpoolId = (String) mbs.getAttribute(mxbeanName, 
+      String blockpoolId = (String) mbs.getAttribute(mxbeanName,
           "BlockPoolId");
       assertEquals(fsn.getBlockPoolId(), blockpoolId);
       // get attribute "Version"
@@ -270,7 +275,7 @@ public class TestNameNodeMXBean {
       assertEquals(0, FileUtil.chmod(
           new File(failedNameDir, "current").getAbsolutePath(), "000"));
       cluster.getNameNodeRpc().rollEditLog();
-      
+
       nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
           "NameDirStatuses"));
       statusMap = (Map<String, Map<String, String>>) 
JSON.parse(nameDirStatuses);
@@ -313,7 +318,7 @@ public class TestNameNodeMXBean {
     hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
 
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(3).build();
       cluster.waitActive();
 
       FSNamesystem fsn = cluster.getNameNode().namesystem;
@@ -366,7 +371,7 @@ public class TestNameNodeMXBean {
     hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
 
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(3).build();
       cluster.waitActive();
 
       FSNamesystem fsn = cluster.getNameNode().namesystem;
@@ -469,7 +474,7 @@ public class TestNameNodeMXBean {
     hostsFileWriter.initialize(conf, "temp/TestInServiceNodes");
 
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(3).build();
       cluster.waitActive();
 
       final FSNamesystem fsn = cluster.getNameNode().namesystem;
@@ -568,7 +573,7 @@ public class TestNameNodeMXBean {
     hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
 
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(3).build();
       cluster.waitActive();
 
       FSNamesystem fsn = cluster.getNameNode().namesystem;
@@ -659,7 +664,7 @@ public class TestNameNodeMXBean {
     final Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(0).build();
       cluster.waitActive();
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
       ObjectName mxbeanNameFsns = new ObjectName(
@@ -675,7 +680,7 @@ public class TestNameNodeMXBean {
           (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
       ObjectMapper mapper = new ObjectMapper();
       Map<String, Object> map = mapper.readValue(topUsers, Map.class);
-      assertTrue("Could not find map key timestamp", 
+      assertTrue("Could not find map key timestamp",
           map.containsKey("timestamp"));
       assertTrue("Could not find map key windows", map.containsKey("windows"));
       List<Map<String, List<Map<String, Object>>>> windows =
@@ -715,7 +720,7 @@ public class TestNameNodeMXBean {
     conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, false);
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(0).build();
       cluster.waitActive();
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
       ObjectName mxbeanNameFsns = new ObjectName(
@@ -744,7 +749,7 @@ public class TestNameNodeMXBean {
     conf.set(DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY, "");
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(0).build();
       cluster.waitActive();
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
       ObjectName mxbeanNameFsns = new ObjectName(
@@ -771,7 +776,7 @@ public class TestNameNodeMXBean {
     final Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster = new MiniDFSCluster.Builder(conf, 
baseDir.getRoot()).numDataNodes(0).build();
       cluster.waitActive();
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
       ObjectName mxbeanNameFs =
@@ -801,7 +806,7 @@ public class TestNameNodeMXBean {
                 .addNN(
                     new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ports[1])));
 
-        cluster = new MiniDFSCluster.Builder(conf)
+        cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
             .nnTopology(topology).numDataNodes(0)
             .build();
         break;
@@ -863,7 +868,7 @@ public class TestNameNodeMXBean {
       int dataBlocks = defaultPolicy.getNumDataUnits();
       int parityBlocks = defaultPolicy.getNumParityUnits();
       int totalSize = dataBlocks + parityBlocks;
-      cluster = new MiniDFSCluster.Builder(conf)
+      cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
           .numDataNodes(totalSize).build();
       fs = cluster.getFileSystem();
 
@@ -903,7 +908,7 @@ public class TestNameNodeMXBean {
           StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
       int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
       int totalSize = dataBlocks + parityBlocks;
-      cluster = new MiniDFSCluster.Builder(conf)
+      cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
           .numDataNodes(totalSize).build();
       fs = cluster.getFileSystem();
       fs.enableErasureCodingPolicy(
@@ -1046,7 +1051,7 @@ public class TestNameNodeMXBean {
       int blockSize = stripesPerBlock * cellSize;
       conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
 
-      cluster = new MiniDFSCluster.Builder(conf)
+      cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
           .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(1)).
               numDataNodes(totalSize).build();
       cluster.waitActive();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
index e3e934b5e377..8f8dd59a1fbf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
@@ -38,6 +38,8 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Rule;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -106,7 +108,10 @@ public class TestRetryCacheWithHA {
       defaultEcPolicy.getNumParityUnits() + 1);
   private static final int CHECKTIMES = 10;
   private static final int ResponseSize = 3;
-  
+
+  @Rule
+  public TemporaryFolder baseDir = new TemporaryFolder();
+
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
   private final Configuration conf = new HdfsConfiguration();
@@ -144,7 +149,7 @@ public class TestRetryCacheWithHA {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 
ResponseSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
-    cluster = new MiniDFSCluster.Builder(conf)
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(DataNodes).build();
     cluster.waitActive();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to