This is an automated email from the ASF dual-hosted git repository.

hemant pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 0008d9ab08 HDDS-9170. Replaced 
GenericTestUtils#assertExceptionContains with AssertJ#assertThat (#5844)
0008d9ab08 is described below

commit 0008d9ab086428ecb00bf3b080835f8f8a9af9f4
Author: Hemant Kumar <[email protected]>
AuthorDate: Thu Dec 21 22:07:30 2023 -0800

    HDDS-9170. Replaced GenericTestUtils#assertExceptionContains with 
AssertJ#assertThat (#5844)
---
 .../hdds/scm/storage/TestChunkInputStream.java     |  13 +-
 .../common/helpers/TestDatanodeVersionFile.java    |  38 ++---
 .../common/impl/TestContainerDataYaml.java         |  49 +++----
 .../container/common/impl/TestContainerSet.java    |  16 +--
 .../TestCloseContainerCommandHandler.java          |  44 +++---
 .../keyvalue/TestKeyValueBlockIterator.java        |  34 ++---
 .../container/keyvalue/TestKeyValueContainer.java  |  59 ++++----
 .../container/keyvalue/TestKeyValueHandler.java    |  19 ++-
 .../keyvalue/impl/CommonChunkManagerTestCases.java |  32 ++---
 .../upgrade/TestDataNodeStartupSlvLessThanMlv.java |  18 +--
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |  15 +-
 .../hdds/scm/safemode/TestSCMSafeModeManager.java  | 102 ++++----------
 .../org/apache/ozone/test/GenericTestUtils.java    |  41 ------
 .../fs/ozone/AbstractOzoneFileSystemTest.java      |  26 ++--
 .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java |  13 +-
 .../org/apache/hadoop/ozone/TestBlockTokens.java   |  12 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java       |  23 ++-
 .../rpc/TestOzoneClientMultipartUploadWithFSO.java |  31 ++---
 .../client/rpc/TestOzoneRpcClientAbstract.java     | 155 ++++++++-------------
 .../ozone/om/TestOzoneManagerConfiguration.java    |  19 ++-
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java |  15 +-
 .../ozone/om/TestOzoneManagerHAWithAllRunning.java |   9 +-
 .../hadoop/ozone/om/TestOzoneManagerRestart.java   |  22 ++-
 hadoop-ozone/interface-storage/pom.xml             |   6 +-
 .../om/helpers/TestOmMultipartKeyInfoCodec.java    |   6 +-
 .../ozone/om/helpers/TestTransactionInfoCodec.java |  16 +--
 .../hadoop/ozone/om/failover/TestOMFailovers.java  |  32 ++---
 .../request/volume/TestOMVolumeCreateRequest.java  |  14 +-
 .../ozone/s3/TestVirtualHostStyleFilter.java       |  50 ++-----
 29 files changed, 322 insertions(+), 607 deletions(-)

diff --git 
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
 
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
index f45529412f..a5de86a84f 100644
--- 
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
+++ 
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
@@ -38,16 +38,16 @@ import org.apache.hadoop.ozone.common.Checksum;
 
 import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.security.token.Token;
-import org.apache.ozone.test.GenericTestUtils;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.mockito.ArgumentCaptor;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static 
org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadChunkResponse;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
@@ -160,14 +160,9 @@ public class TestChunkInputStream {
   @Test
   public void testSeek() throws Exception {
     seekAndVerify(0);
+    EOFException eofException = assertThrows(EOFException.class, () ->  
seekAndVerify(CHUNK_SIZE + 1));
+    assertThat(eofException).hasMessage("EOF encountered at pos: " + 
(CHUNK_SIZE + 1) + " for chunk: " + CHUNK_NAME);
 
-    try {
-      seekAndVerify(CHUNK_SIZE + 1);
-      fail("Seeking to more than the length of Chunk should fail.");
-    } catch (EOFException e) {
-      GenericTestUtils.assertExceptionContains("EOF encountered at pos: "
-          + (CHUNK_SIZE + 1) + " for chunk: " + CHUNK_NAME, e);
-    }
     // Seek before read should update the ChunkInputStream#chunkPosition
     seekAndVerify(25);
     assertEquals(25, chunkStream.getChunkPosition());
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
index d9dc7de6d9..90cd925611 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.common.helpers;
 import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
 import org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
-import org.apache.ozone.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
@@ -33,9 +32,10 @@ import java.nio.file.Path;
 import java.util.Properties;
 import java.util.UUID;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * This class tests {@link DatanodeVersionFile}.
@@ -92,15 +92,11 @@ public class TestDatanodeVersionFile {
   }
 
   @Test
-  public void testIncorrectClusterId() throws IOException {
-    try {
-      String randomClusterID = UUID.randomUUID().toString();
-      StorageVolumeUtil.getClusterID(properties, versionFile,
-          randomClusterID);
-      fail("Test failure in testIncorrectClusterId");
-    } catch (InconsistentStorageStateException ex) {
-      GenericTestUtils.assertExceptionContains("Mismatched ClusterIDs", ex);
-    }
+  public void testIncorrectClusterId() {
+    String randomClusterID = UUID.randomUUID().toString();
+    InconsistentStorageStateException exception = 
assertThrows(InconsistentStorageStateException.class,
+        () -> StorageVolumeUtil.getClusterID(properties, versionFile, 
randomClusterID));
+    assertThat(exception).hasMessageContaining("Mismatched ClusterIDs");
   }
 
   @Test
@@ -111,13 +107,9 @@ public class TestDatanodeVersionFile {
     dnVersionFile.createVersionFile(versionFile);
     properties = dnVersionFile.readFrom(versionFile);
 
-    try {
-      StorageVolumeUtil.getCreationTime(properties, versionFile);
-      fail("Test failure in testVerifyCTime");
-    } catch (InconsistentStorageStateException ex) {
-      GenericTestUtils.assertExceptionContains("Invalid Creation time in " +
-          "Version File : " + versionFile, ex);
-    }
+    InconsistentStorageStateException exception = 
assertThrows(InconsistentStorageStateException.class,
+        () -> StorageVolumeUtil.getCreationTime(properties, versionFile));
+    assertThat(exception).hasMessageContaining("Invalid Creation time in 
Version File : " + versionFile);
   }
 
   @Test
@@ -127,12 +119,8 @@ public class TestDatanodeVersionFile {
         storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion);
     dnVersionFile.createVersionFile(versionFile);
     Properties props = dnVersionFile.readFrom(versionFile);
-
-    try {
-      StorageVolumeUtil.getLayOutVersion(props, versionFile);
-      fail("Test failure in testVerifyLayOut");
-    } catch (InconsistentStorageStateException ex) {
-      GenericTestUtils.assertExceptionContains("Invalid layOutVersion.", ex);
-    }
+    InconsistentStorageStateException exception = 
assertThrows(InconsistentStorageStateException.class,
+        () -> StorageVolumeUtil.getLayOutVersion(props, versionFile));
+    assertThat(exception).hasMessageContaining("Invalid layOutVersion.");
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index 4bd2ece41e..786c793b34 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -30,7 +31,6 @@ import 
org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
-import org.apache.ozone.test.GenericTestUtils;
 
 import java.io.File;
 import java.io.IOException;
@@ -39,12 +39,13 @@ import java.time.Instant;
 import java.util.UUID;
 
 import static 
org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
-
 /**
  * This class tests create/read .container files.
  */
@@ -188,22 +189,18 @@ public class TestContainerDataYaml {
     cleanup();
   }
 
-
   @ContainerLayoutTestInfo.ContainerTest
-  public void testIncorrectContainerFile(ContainerLayoutVersion layout)
-      throws IOException {
+  public void testIncorrectContainerFile(ContainerLayoutVersion layout) {
     setLayoutVersion(layout);
-    try {
-      String containerFile = "incorrect.container";
-      //Get file from resources folder
-      ClassLoader classLoader = getClass().getClassLoader();
-      File file = new File(classLoader.getResource(containerFile).getFile());
-      KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-          .readContainerFile(file);
-      fail("testIncorrectContainerFile failed");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("No enum constant", ex);
-    }
+    String containerFile = "incorrect.container";
+
+    // Get file from resource folder
+    ClassLoader classLoader = getClass().getClassLoader();
+    File file = new File(classLoader.getResource(containerFile).getFile());
+    IllegalArgumentException exception = 
assertThrows(IllegalArgumentException.class,
+        () -> ContainerDataYaml.readContainerFile(file));
+
+    assertThat(exception).hasMessageContaining("No enum constant");
   }
 
 
@@ -246,26 +243,24 @@ public class TestContainerDataYaml {
   }
 
   /**
-   * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}.
+   * Test to verify {@link 
ContainerUtils#verifyChecksum(ContainerData,ConfigurationSource)}.
    */
   @ContainerLayoutTestInfo.ContainerTest
-  public void testChecksumInContainerFile(ContainerLayoutVersion layout)
-      throws IOException {
+  public void testChecksumInContainerFile(ContainerLayoutVersion layout) 
throws IOException {
     setLayoutVersion(layout);
     long containerID = testContainerID++;
 
     File containerFile = createContainerFile(containerID, 0);
 
     // Read from .container file, and verify data.
-    KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
+    KeyValueContainerData kvData = (KeyValueContainerData) 
ContainerDataYaml.readContainerFile(containerFile);
     ContainerUtils.verifyChecksum(kvData, conf);
 
     cleanup();
   }
 
   /**
-   * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}.
+   * Test to verify {@link 
ContainerUtils#verifyChecksum(ContainerData,ConfigurationSource)}.
    */
   @ContainerLayoutTestInfo.ContainerTest
   public void testChecksumInContainerFileWithReplicaIndex(
@@ -297,14 +292,12 @@ public class TestContainerDataYaml {
   @ContainerLayoutTestInfo.ContainerTest
   public void testIncorrectChecksum(ContainerLayoutVersion layout) {
     setLayoutVersion(layout);
-    try {
+    Exception ex = assertThrows(Exception.class, () -> {
       KeyValueContainerData kvData = getKeyValueContainerData();
       ContainerUtils.verifyChecksum(kvData, conf);
-      fail("testIncorrectChecksum failed");
-    } catch (Exception ex) {
-      GenericTestUtils.assertExceptionContains("Container checksum error for " 
+
-          "ContainerID:", ex);
-    }
+    });
+
+    assertThat(ex).hasMessageStartingWith("Container checksum error for 
ContainerID:");
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
index 13b8fb6d30..d0d3576c48 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
@@ -20,8 +20,7 @@ package org.apache.hadoop.ozone.container.common.impl;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 
@@ -29,7 +28,6 @@ import 
org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.ozone.test.GenericTestUtils;
 import org.mockito.Mockito;
 
 import java.io.IOException;
@@ -43,10 +41,12 @@ import java.util.Random;
 import java.util.UUID;
 import java.util.stream.LongStream;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
@@ -83,13 +83,9 @@ public class TestContainerSet {
     //addContainer
     boolean result = containerSet.addContainer(keyValueContainer);
     assertTrue(result);
-    try {
-      containerSet.addContainer(keyValueContainer);
-      fail("Adding same container ID twice should fail.");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Container already exists with" 
+
-          " container Id " + containerId, ex);
-    }
+    StorageContainerException exception = 
assertThrows(StorageContainerException.class,
+        () -> containerSet.addContainer(keyValueContainer));
+    assertThat(exception).hasMessage("Container already exists with container 
Id " + containerId);
 
     //getContainer
     KeyValueContainer container = (KeyValueContainer) containerSet
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index 79107ce111..0c526a2f20 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -35,12 +35,14 @@ import 
org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.ozone.test.GenericTestUtils;
+import org.junit.jupiter.api.Assertions;
 
 import java.io.IOException;
 import java.util.UUID;
 
 import static java.util.Collections.singletonMap;
 import static org.apache.hadoop.ozone.OzoneConsts.GB;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.mock;
@@ -69,7 +71,7 @@ public class TestCloseContainerCommandHandler {
 
   private ContainerLayoutVersion layoutVersion;
 
-  public void initLayoutVerison(ContainerLayoutVersion layout)
+  public void initLayoutVersion(ContainerLayoutVersion layout)
       throws Exception {
     this.layoutVersion = layout;
     init();
@@ -106,7 +108,7 @@ public class TestCloseContainerCommandHandler {
   @ContainerLayoutTestInfo.ContainerTest
   public void closeContainerWithPipeline(ContainerLayoutVersion layout)
       throws Exception {
-    initLayoutVerison(layout);
+    initLayoutVersion(layout);
     // close a container that's associated with an existing pipeline
     subject.handle(closeWithKnownPipeline(), ozoneContainer, context, null);
     waitTillFinishExecution(subject);
@@ -121,7 +123,7 @@ public class TestCloseContainerCommandHandler {
   @ContainerLayoutTestInfo.ContainerTest
   public void closeContainerWithoutPipeline(ContainerLayoutVersion layout)
       throws Exception {
-    initLayoutVerison(layout);
+    initLayoutVersion(layout);
     // close a container that's NOT associated with an open pipeline
     subject.handle(closeWithUnknownPipeline(), ozoneContainer, context, null);
     waitTillFinishExecution(subject);
@@ -139,7 +141,7 @@ public class TestCloseContainerCommandHandler {
   @ContainerLayoutTestInfo.ContainerTest
   public void closeContainerWithForceFlagSet(ContainerLayoutVersion layout)
       throws Exception {
-    initLayoutVerison(layout);
+    initLayoutVersion(layout);
     // close a container that's associated with an existing pipeline
     subject.handle(forceCloseWithoutPipeline(), ozoneContainer, context, null);
     waitTillFinishExecution(subject);
@@ -153,7 +155,7 @@ public class TestCloseContainerCommandHandler {
   @ContainerLayoutTestInfo.ContainerTest
   public void forceCloseQuasiClosedContainer(ContainerLayoutVersion layout)
       throws Exception {
-    initLayoutVerison(layout);
+    initLayoutVersion(layout);
     // force-close a container that's already quasi closed
     container.getContainerData()
         .setState(ContainerProtos.ContainerDataProto.State.QUASI_CLOSED);
@@ -170,7 +172,7 @@ public class TestCloseContainerCommandHandler {
   @ContainerLayoutTestInfo.ContainerTest
   public void forceCloseOpenContainer(ContainerLayoutVersion layout)
       throws Exception {
-    initLayoutVerison(layout);
+    initLayoutVersion(layout);
     // force-close a container that's NOT associated with an open pipeline
     subject.handle(forceCloseWithoutPipeline(), ozoneContainer, context, null);
     waitTillFinishExecution(subject);
@@ -186,7 +188,7 @@ public class TestCloseContainerCommandHandler {
   @ContainerLayoutTestInfo.ContainerTest
   public void forceCloseOpenContainerWithPipeline(ContainerLayoutVersion 
layout)
       throws Exception {
-    initLayoutVerison(layout);
+    initLayoutVersion(layout);
     // force-close a container that's associated with an existing pipeline
     subject.handle(forceCloseWithPipeline(), ozoneContainer, context, null);
     waitTillFinishExecution(subject);
@@ -204,7 +206,7 @@ public class TestCloseContainerCommandHandler {
   @ContainerLayoutTestInfo.ContainerTest
   public void closeAlreadyClosedContainer(ContainerLayoutVersion layout)
       throws Exception {
-    initLayoutVerison(layout);
+    initLayoutVersion(layout);
     container.getContainerData()
         .setState(ContainerProtos.ContainerDataProto.State.CLOSED);
 
@@ -226,32 +228,24 @@ public class TestCloseContainerCommandHandler {
   }
 
   @ContainerLayoutTestInfo.ContainerTest
-  public void closeNonExistenceContainer(ContainerLayoutVersion layout)
-      throws Exception {
-    initLayoutVerison(layout);
+  public void closeNonExistenceContainer(ContainerLayoutVersion layout) throws 
Exception {
+    initLayoutVersion(layout);
     long containerID = 1L;
-    try {
-      controller.markContainerForClose(containerID);
-    } catch (IOException e) {
 
-      GenericTestUtils.assertExceptionContains("The Container " +
-          "is not found. ContainerID: " + containerID, e);
-    }
+    IOException ioe = Assertions.assertThrows(IOException.class, () -> 
controller.markContainerForClose(containerID));
+    assertThat(ioe).hasMessage("The Container is not found. ContainerID: " + 
containerID);
   }
 
   @ContainerLayoutTestInfo.ContainerTest
   public void closeMissingContainer(ContainerLayoutVersion layout)
       throws Exception {
-    initLayoutVerison(layout);
+    initLayoutVersion(layout);
     long containerID = 2L;
     containerSet.getMissingContainerSet().add(containerID);
-    try {
-      controller.markContainerForClose(containerID);
-    } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains("The Container is in " +
-          "the MissingContainerSet hence we can't close it. " +
-          "ContainerID: " + containerID, e);
-    }
+
+    IOException ioe = Assertions.assertThrows(IOException.class, () -> 
controller.markContainerForClose(containerID));
+    assertThat(ioe)
+        .hasMessage("The Container is in the MissingContainerSet hence we 
can't close it. ContainerID: " + containerID);
   }
 
   private CloseContainerCommand closeWithKnownPipeline() {
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index 6e8ad7196d..52316c4326 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -54,8 +54,10 @@ import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import org.junit.jupiter.api.AfterEach;
 
 import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.junit.jupiter.params.ParameterizedTest;
@@ -169,12 +171,8 @@ public class TestKeyValueBlockIterator {
       assertFalse(keyValueBlockIterator.hasNext());
       assertFalse(blockIDIter.hasNext());
 
-      try {
-        keyValueBlockIterator.nextBlock();
-      } catch (NoSuchElementException ex) {
-        GenericTestUtils.assertExceptionContains("Block Iterator reached end " 
+
-            "for ContainerID " + CONTAINER_ID, ex);
-      }
+      NoSuchElementException exception = 
assertThrows(NoSuchElementException.class, keyValueBlockIterator::nextBlock);
+      assertThat(exception).hasMessage("Block Iterator reached end for 
ContainerID " + CONTAINER_ID);
     }
   }
 
@@ -192,12 +190,8 @@ public class TestKeyValueBlockIterator {
       assertEquals((long) blockIDs.get(1),
           keyValueBlockIterator.nextBlock().getLocalID());
 
-      try {
-        keyValueBlockIterator.nextBlock();
-      } catch (NoSuchElementException ex) {
-        GenericTestUtils.assertExceptionContains("Block Iterator reached end " 
+
-            "for ContainerID " + CONTAINER_ID, ex);
-      }
+      NoSuchElementException exception = 
assertThrows(NoSuchElementException.class, keyValueBlockIterator::nextBlock);
+      assertThat(exception).hasMessage("Block Iterator reached end for 
ContainerID " + CONTAINER_ID);
     }
   }
 
@@ -208,8 +202,7 @@ public class TestKeyValueBlockIterator {
       throws Exception {
     initTest(versionInfo, keySeparator);
     List<Long> blockIDs = createContainerWithBlocks(CONTAINER_ID, 2);
-    try (BlockIterator<BlockData> blockIter =
-             db.getStore().getBlockIterator(CONTAINER_ID)) {
+    try (BlockIterator<BlockData> blockIter = 
db.getStore().getBlockIterator(CONTAINER_ID)) {
 
       // Even calling multiple times hasNext() should not move entry forward.
       assertTrue(blockIter.hasNext());
@@ -217,8 +210,7 @@ public class TestKeyValueBlockIterator {
       assertTrue(blockIter.hasNext());
       assertTrue(blockIter.hasNext());
       assertTrue(blockIter.hasNext());
-      assertEquals((long) blockIDs.get(0),
-          blockIter.nextBlock().getLocalID());
+      assertEquals((long) blockIDs.get(0), blockIter.nextBlock().getLocalID());
 
       assertTrue(blockIter.hasNext());
       assertTrue(blockIter.hasNext());
@@ -229,14 +221,10 @@ public class TestKeyValueBlockIterator {
 
       blockIter.seekToFirst();
       assertEquals((long) blockIDs.get(0), blockIter.nextBlock().getLocalID());
-      assertEquals((long)blockIDs.get(1), blockIter.nextBlock().getLocalID());
+      assertEquals((long) blockIDs.get(1), blockIter.nextBlock().getLocalID());
 
-      try {
-        blockIter.nextBlock();
-      } catch (NoSuchElementException ex) {
-        GenericTestUtils.assertExceptionContains("Block Iterator reached end " 
+
-            "for ContainerID " + CONTAINER_ID, ex);
-      }
+      NoSuchElementException exception = 
assertThrows(NoSuchElementException.class, blockIter::nextBlock);
+      assertThat(exception).hasMessage("Block Iterator reached end for 
ContainerID " + CONTAINER_ID);
     }
   }
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index 9e2e2de6db..a841c0f38f 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -25,8 +25,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdds.utils.db.CodecBuffer;
 import org.apache.hadoop.hdds.utils.db.DBProfile;
 import org.apache.hadoop.hdds.utils.db.RDBStore;
@@ -43,8 +42,7 @@ import 
org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.common.volume
-    .RoundRobinVolumeChoosingPolicy;
+import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
@@ -92,9 +90,11 @@ import static 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConf
 import static 
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion;
 import static 
org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION;
 import static org.apache.ratis.util.Preconditions.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.fail;
 import static org.junit.jupiter.api.Assumptions.assumeTrue;
 import static org.mockito.ArgumentMatchers.anyList;
@@ -540,20 +540,14 @@ public class TestKeyValueContainer {
   }
 
   @ContainerTestVersionInfo.ContainerTest
-  public void testDuplicateContainer(ContainerTestVersionInfo versionInfo)
-      throws Exception {
+  public void testDuplicateContainer(ContainerTestVersionInfo versionInfo) 
throws Exception {
     init(versionInfo);
-    try {
-      // Create Container.
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      fail("testDuplicateContainer failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("ContainerFile already " +
-          "exists", ex);
-      assertEquals(ContainerProtos.Result.CONTAINER_ALREADY_EXISTS, ex
-          .getResult());
-    }
+
+    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+    StorageContainerException exception = 
assertThrows(StorageContainerException.class, () ->
+        keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId));
+    assertEquals(ContainerProtos.Result.CONTAINER_ALREADY_EXISTS, 
exception.getResult());
+    assertThat(exception).hasMessage("Container creation failed because 
ContainerFile already exists");
   }
 
   @ContainerTestVersionInfo.ContainerTest
@@ -563,14 +557,11 @@ public class TestKeyValueContainer {
     Mockito.reset(volumeChoosingPolicy);
     Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
         .thenThrow(DiskChecker.DiskOutOfSpaceException.class);
-    try {
-      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-      fail("testDiskFullExceptionCreateContainer failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("disk out of space",
-          ex);
-      assertEquals(ContainerProtos.Result.DISK_OUT_OF_SPACE, ex.getResult());
-    }
+
+    StorageContainerException exception = 
assertThrows(StorageContainerException.class, () ->
+        keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId));
+    assertEquals(ContainerProtos.Result.DISK_OUT_OF_SPACE, 
exception.getResult());
+    assertThat(exception).hasMessage("Container creation failed, due to disk 
out of space");
   }
 
   @ContainerTestVersionInfo.ContainerTest
@@ -669,20 +660,20 @@ public class TestKeyValueContainer {
   public void testUpdateContainerUnsupportedRequest(
       ContainerTestVersionInfo versionInfo) throws Exception {
     init(versionInfo);
-    try {
-      closeContainer();
+
+    closeContainer();
+
+    StorageContainerException exception = 
assertThrows(StorageContainerException.class, () -> {
       keyValueContainer = new KeyValueContainer(keyValueContainerData, CONF);
       keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
       Map<String, String> metadata = new HashMap<>();
       metadata.put(OzoneConsts.VOLUME, OzoneConsts.OZONE);
       keyValueContainer.update(metadata, false);
-      fail("testUpdateContainerUnsupportedRequest failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Updating a closed container " +
-          "without force option is not allowed", ex);
-      assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex
-          .getResult());
-    }
+    });
+
+    assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, 
exception.getResult());
+    assertThat(exception)
+        .hasMessageStartingWith("Updating a closed container without force 
option is not allowed. ContainerID: ");
   }
 
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index a970013ef8..c17ce8c7c9 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -57,9 +57,11 @@ import org.apache.ozone.test.GenericTestUtils;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 
@@ -277,16 +279,13 @@ public class TestKeyValueHandler {
       //Set a class which is not of sub class of VolumeChoosingPolicy
       conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
           "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher");
-      try {
-        new KeyValueHandler(conf,
-            context.getParent().getDatanodeDetails().getUuidString(),
-            cset, volumeSet, metrics, c -> { });
-      } catch (RuntimeException ex) {
-        GenericTestUtils.assertExceptionContains("class org.apache.hadoop" +
-            ".ozone.container.common.impl.HddsDispatcher not org.apache" +
-            ".hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy",
-            ex);
-      }
+      RuntimeException exception = assertThrows(RuntimeException.class,
+          () -> new KeyValueHandler(conf, 
context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet,
+              metrics, c -> { }));
+
+      assertThat(exception).hasMessageEndingWith(
+          "class org.apache.hadoop.ozone.container.common.impl.HddsDispatcher 
" +
+              "not 
org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy");
     } finally {
       volumeSet.shutdown();
       FileUtil.fullyDelete(datanodeDir);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
index ad85970494..6ad6936bcc 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
@@ -26,7 +26,6 @@ import 
org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.Test;
 
 import java.io.File;
@@ -37,6 +36,7 @@ import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE;
 import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMBINED_STAGE;
 import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -45,32 +45,20 @@ import static org.junit.jupiter.api.Assertions.fail;
 /**
  * Common test cases for ChunkManager implementation tests.
  */
-public abstract class CommonChunkManagerTestCases
-    extends AbstractTestChunkManager {
+public abstract class CommonChunkManagerTestCases extends 
AbstractTestChunkManager {
 
   @Test
   public void testWriteChunkIncorrectLength() {
-    // GIVEN
     ChunkManager chunkManager = createTestSubject();
-    try {
-      long randomLength = 200L;
-      BlockID blockID = getBlockID();
-      ChunkInfo chunkInfo = new ChunkInfo(
-          String.format("%d.data.%d", blockID.getLocalID(), 0),
-          0, randomLength);
-
-      chunkManager.writeChunk(getKeyValueContainer(), blockID, chunkInfo,
-          getData(),
-          WRITE_STAGE);
+    long randomLength = 200L;
+    BlockID blockID = getBlockID();
+    ChunkInfo chunkInfo = new ChunkInfo(String.format("%d.data.%d", 
blockID.getLocalID(), 0), 0, randomLength);
 
-      // THEN
-      fail("testWriteChunkIncorrectLength failed");
-    } catch (StorageContainerException ex) {
-      // As we got an exception, writeBytes should be 0.
-      checkWriteIOStats(0, 0);
-      GenericTestUtils.assertExceptionContains("Unexpected buffer size", ex);
-      assertEquals(ContainerProtos.Result.INVALID_WRITE_SIZE, ex.getResult());
-    }
+    StorageContainerException exception = 
assertThrows(StorageContainerException.class,
+        () -> chunkManager.writeChunk(getKeyValueContainer(), blockID, 
chunkInfo, getData(), WRITE_STAGE));
+    checkWriteIOStats(0, 0);
+    assertEquals(ContainerProtos.Result.INVALID_WRITE_SIZE, 
exception.getResult());
+    assertThat(exception).hasMessageStartingWith("Unexpected buffer size");
   }
 
   @Test
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
index e7d20028a6..e9fef6ecfd 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.ozone.container.upgrade;
 
 import static 
org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
 import static org.apache.hadoop.ozone.OzoneConsts.DATANODE_LAYOUT_VERSION_DIR;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 
 import java.io.File;
 import java.io.IOException;
@@ -32,8 +34,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
-import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
 
@@ -65,18 +65,12 @@ public class TestDataNodeStartupSlvLessThanMlv {
     UpgradeTestUtils.createVersionFile(datanodeSubdir,
         HddsProtos.NodeType.DATANODE, mlv);
 
-    try {
-      new DatanodeStateMachine(getNewDatanodeDetails(), conf);
-      Assertions.fail("Expected IOException due to incorrect MLV on DataNode " 
+
-          "creation.");
-    } catch (IOException e) {
-      String expectedMessage = String.format("Metadata layout version (%s) > " 
+
-          "software layout version (%s)", mlv, largestSlv);
-      GenericTestUtils.assertExceptionContains(expectedMessage, e);
-    }
+    IOException ioException = assertThrows(IOException.class,
+        () -> new DatanodeStateMachine(getNewDatanodeDetails(), conf));
+    assertThat(ioException).hasMessageEndingWith(
+        String.format("Metadata layout version (%s) > software layout version 
(%s)", mlv, largestSlv));
   }
 
-
   private DatanodeDetails getNewDatanodeDetails() {
     DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
         DatanodeDetails.Port.Name.STANDALONE, 0);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 333830a7d7..2f7663dcd4 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -42,8 +42,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.ha.SCMContext;
 import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
@@ -111,9 +110,11 @@ import static 
org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
 import static 
org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND_COUNT_UPDATED;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.NEW_NODE;
 import static 
org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.mock;
@@ -1123,13 +1124,11 @@ public class TestSCMNodeManager {
    * @throws IOException
    */
   @Test
-  public void testScmCheckForErrorOnNullDatanodeDetails()
-      throws IOException, AuthenticationException {
+  public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException, 
AuthenticationException {
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
-      nodeManager.processHeartbeat(null, null);
-    } catch (NullPointerException npe) {
-      GenericTestUtils.assertExceptionContains("Heartbeat is missing " +
-          "DatanodeDetails.", npe);
+      NullPointerException npe = assertThrows(NullPointerException.class,
+          () -> nodeManager.processHeartbeat(null, null));
+      assertThat(npe).hasMessage("Heartbeat is missing DatanodeDetails.");
     }
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index ad59d323eb..dea95cb826 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -62,12 +62,15 @@ import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Timeout;
 import org.junit.jupiter.api.io.TempDir;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
 import org.mockito.Mockito;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
 
 /** Test class for SCMSafeModeManager.
  */
@@ -238,87 +241,30 @@ public class TestSCMSafeModeManager {
     testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0.5);
   }
 
-  @Test
-  public void testFailWithIncorrectValueForHealthyPipelinePercent()
-      throws Exception {
-    try {
-      OzoneConfiguration conf = createConf(100,
-          0.9);
-      MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
-      PipelineManager pipelineManager =
-          PipelineManagerImpl.newPipelineManager(
-              conf,
-              SCMHAManagerStub.getInstance(true),
-              mockNodeManager,
-              scmMetadataStore.getPipelineTable(),
-              queue,
-              scmContext,
-              serviceManager,
-              Clock.system(ZoneOffset.UTC));
-      scmSafeModeManager = new SCMSafeModeManager(
-          conf, containers, null, pipelineManager, queue, serviceManager,
-          scmContext);
-      fail("testFailWithIncorrectValueForHealthyPipelinePercent");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" 
+
-          " 1.0", ex);
-    }
-  }
-
-  @Test
-  public void testFailWithIncorrectValueForOneReplicaPipelinePercent()
-      throws Exception {
-    try {
-      OzoneConfiguration conf = createConf(0.9,
-          200);
-      MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
-      PipelineManager pipelineManager =
-          PipelineManagerImpl.newPipelineManager(
-              conf,
-              SCMHAManagerStub.getInstance(true),
-              mockNodeManager,
-              scmMetadataStore.getPipelineTable(),
-              queue,
-              scmContext,
-              serviceManager,
-              Clock.system(ZoneOffset.UTC));
-      scmSafeModeManager = new SCMSafeModeManager(
-          conf, containers, null, pipelineManager, queue, serviceManager,
-          scmContext);
-      fail("testFailWithIncorrectValueForOneReplicaPipelinePercent");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" 
+
-          " 1.0", ex);
-    }
-  }
-
-  @Test
-  public void testFailWithIncorrectValueForSafeModePercent() throws Exception {
-    try {
-      OzoneConfiguration conf = createConf(0.9, 0.1);
+  @ParameterizedTest
+  @CsvSource(value = {"100,0.9,false", "0.9,200,false", "0.9,0.1,true"})
+  public void testHealthyPipelinePercentWithIncorrectValue(double 
healthyPercent,
+                                                           double 
oneReplicaPercent,
+                                                           boolean 
overrideScmSafeModeThresholdPct) throws Exception {
+    OzoneConfiguration conf = createConf(healthyPercent, oneReplicaPercent);
+    if (overrideScmSafeModeThresholdPct) {
       conf.setDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, -1.0);
-      MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
-      PipelineManager pipelineManager =
-          PipelineManagerImpl.newPipelineManager(
-              conf,
-              SCMHAManagerStub.getInstance(true),
-              mockNodeManager,
-              scmMetadataStore.getPipelineTable(),
-              queue,
-              scmContext,
-              serviceManager,
-              Clock.system(ZoneOffset.UTC));
-      scmSafeModeManager = new SCMSafeModeManager(
-          conf, containers, null, pipelineManager, queue, serviceManager,
-          scmContext);
-      fail("testFailWithIncorrectValueForSafeModePercent");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" 
+
-          " 1.0", ex);
     }
+    MockNodeManager mockNodeManager = new MockNodeManager(true, 10);
+    PipelineManager pipelineManager = PipelineManagerImpl.newPipelineManager(
+        conf,
+        SCMHAManagerStub.getInstance(true),
+        mockNodeManager,
+        scmMetadataStore.getPipelineTable(),
+        queue,
+        scmContext,
+        serviceManager,
+        Clock.system(ZoneOffset.UTC));
+    IllegalArgumentException exception = 
assertThrows(IllegalArgumentException.class,
+        () -> new SCMSafeModeManager(conf, containers, null, pipelineManager, 
queue, serviceManager, scmContext));
+    assertThat(exception).hasMessageEndingWith("value should be >= 0.0 and <= 
1.0");
   }
 
-
   public void testSafeModeExitRuleWithPipelineAvailabilityCheck(
       int containerCount, int nodeCount, int pipelineCount,
       double healthyPipelinePercent, double oneReplicaPercent)
diff --git 
a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
 
b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
index 856ac30a32..406a58768a 100644
--- 
a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
+++ 
b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java
@@ -22,7 +22,6 @@ import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.OutputStream;
 import java.io.PrintStream;
-import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.io.UnsupportedEncodingException;
 import java.util.List;
@@ -31,7 +30,6 @@ import java.util.concurrent.TimeoutException;
 
 import com.google.common.base.Preconditions;
 import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.log4j.Layout;
 import org.apache.log4j.Level;
@@ -146,13 +144,6 @@ public abstract class GenericTestUtils {
         + "-" + randomAlphanumeric(10));
   }
 
-  /**
-   * Assert that a given file exists.
-   */
-  public static void assertExists(File f) {
-    Assertions.assertTrue(f.exists(), "File " + f + " should exist");
-  }
-
   /**
    * Assert that a given dir can be created or it already exists.
    */
@@ -161,38 +152,6 @@ public abstract class GenericTestUtils {
         "Could not create dir " + f + ", nor does it exist");
   }
 
-  public static void assertExceptionContains(String expectedText, Throwable t) 
{
-    assertExceptionContains(expectedText, t, "");
-  }
-
-  public static void assertExceptionContains(String expectedText, Throwable t,
-      String message) {
-    Assertions.assertNotNull(t, "Null Throwable");
-    String msg = t.toString();
-    if (msg == null) {
-      throw new AssertionError("Null Throwable.toString() value", t);
-    } else if (expectedText != null && !msg.contains(expectedText)) {
-      String prefix = StringUtils.isEmpty(message) ? "" : message + ": ";
-      throw new AssertionError(String
-          .format("%s Expected to find '%s' %s: %s", prefix, expectedText,
-              "but got unexpected exception",
-              stringifyException(t)), t);
-    }
-  }
-
-  /**
-   * Make a string representation of the exception.
-   * @param e The exception to stringify
-   * @return A string with exception name and call stack.
-   */
-  public static String stringifyException(Throwable e) {
-    StringWriter stm = new StringWriter();
-    PrintWriter wrt = new PrintWriter(stm);
-    e.printStackTrace(wrt);
-    wrt.close();
-    return stm.toString();
-  }
-
   /**
    * Wait for the specified test to return true. The test will be performed
    * initially and then every {@code checkEveryMillis} until at least
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
index ba55b2afcf..38b0272ab3 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
@@ -101,6 +101,7 @@ import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
 import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
+import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static 
org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -392,8 +393,8 @@ abstract class AbstractOzoneFileSystemTest {
     // Creating a child should not add parent keys to the bucket
     try {
       getKey(parent, true);
-    } catch (IOException ex) {
-      assertKeyNotFoundException(ex);
+    } catch (OMException ome) {
+      assertEquals(KEY_NOT_FOUND, ome.getResult());
     }
 
     // List status on the parent should show the child file
@@ -412,8 +413,8 @@ abstract class AbstractOzoneFileSystemTest {
     // Creating a child should not add parent keys to the bucket
     try {
       getKey(parent, true);
-    } catch (IOException ex) {
-      assertKeyNotFoundException(ex);
+    } catch (OMException ome) {
+      assertEquals(KEY_NOT_FOUND, ome.getResult());
     }
 
     // Delete the child key
@@ -1355,10 +1356,6 @@ abstract class AbstractOzoneFileSystemTest {
         .getBucket(bucketName).getKey(key);
   }
 
-  private void assertKeyNotFoundException(IOException ex) {
-    GenericTestUtils.assertExceptionContains("KEY_NOT_FOUND", ex);
-  }
-
   @Test
   public void testGetDirectoryModificationTime()
       throws IOException, InterruptedException {
@@ -1585,14 +1582,11 @@ abstract class AbstractOzoneFileSystemTest {
       paths.add(keyName + OM_KEY_PREFIX + p.getName());
     }
 
-    // unknown keyname
-    try {
-      new OzonePrefixPathImpl(getVolumeName(), getBucketName(), "invalidKey",
-          cluster.getOzoneManager().getKeyManager());
-      fail("Non-existent key name!");
-    } catch (OMException ome) {
-      assertEquals(OMException.ResultCodes.KEY_NOT_FOUND, ome.getResult());
-    }
+    // unknown keyName
+    OMException ome = assertThrows(OMException.class,
+        () -> new OzonePrefixPathImpl(getVolumeName(), getBucketName(), 
"invalidKey",
+            cluster.getOzoneManager().getKeyManager()));
+    assertEquals(KEY_NOT_FOUND, ome.getResult());
 
     OzonePrefixPathImpl ozonePrefixPath =
         new OzonePrefixPathImpl(getVolumeName(), getBucketName(), keyName,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index 73d1301f0f..c82c521a14 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -350,8 +350,8 @@ public class TestRootedOzoneFileSystem {
     // Creating a child should not add parent keys to the bucket
     try {
       getKey(parent, true);
-    } catch (IOException ex) {
-      assertKeyNotFoundException(ex);
+    } catch (OMException ome) {
+      assertEquals(KEY_NOT_FOUND, ome.getResult());
     }
 
     // List status on the parent should show the child file
@@ -421,10 +421,11 @@ public class TestRootedOzoneFileSystem {
     // Creating a child should not add parent keys to the bucket
     try {
       getKey(parent, true);
-    } catch (IOException ex) {
-      assertKeyNotFoundException(ex);
+    } catch (OMException ome) {
+      assertEquals(KEY_NOT_FOUND, ome.getResult());
     }
 
+
     // Delete the child key
     assertTrue(fs.delete(child, false));
 
@@ -971,10 +972,6 @@ public class TestRootedOzoneFileSystem {
         .getBucket(bucketName).getKey(keyInBucket);
   }
 
-  private void assertKeyNotFoundException(IOException ex) {
-    GenericTestUtils.assertExceptionContains("KEY_NOT_FOUND", ex);
-  }
-
   /**
    * Helper function for testListStatusRootAndVolume*.
    * Each call creates one volume, one bucket under that volume,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
index f410c50c46..e028e6741e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java
@@ -90,8 +90,8 @@ import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRI
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
 import static 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
-import static org.apache.ozone.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.ozone.test.GenericTestUtils.waitFor;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
@@ -101,8 +101,7 @@ import static org.junit.Assert.assertTrue;
  */
 @InterfaceAudience.Private
 public final class TestBlockTokens {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestBlockTokens.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestBlockTokens.class);
   private static final String TEST_VOLUME = "testvolume";
   private static final String TEST_BUCKET = "testbucket";
   private static final String TEST_FILE = "testfile";
@@ -207,8 +206,7 @@ public final class TestBlockTokens {
     StorageContainerException ex = 
assertThrows(StorageContainerException.class,
         () -> readDataWithoutRetry(keyInfo));
     assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult());
-    assertExceptionContains(
-        "Token can't be verified due to expired secret key", ex);
+    assertThat(ex).hasMessageContaining("Token can't be verified due to 
expired secret key");
   }
 
   @Test
@@ -254,7 +252,7 @@ public final class TestBlockTokens {
         assertThrows(StorageContainerException.class,
             () -> readDataWithoutRetry(keyInfo));
     assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult());
-    assertExceptionContains("Can't find the signing secret key", ex);
+    assertThat(ex).hasMessageContaining("Can't find the signing secret key");
   }
 
   @Test
@@ -277,7 +275,7 @@ public final class TestBlockTokens {
         assertThrows(StorageContainerException.class,
             () -> readDataWithoutRetry(keyInfo));
     assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult());
-    assertExceptionContains("Invalid token for user", ex);
+    assertThat(ex).hasMessageContaining("Invalid token for user");
   }
 
   private UUID extractSecretKeyId(OmKeyInfo keyInfo) throws IOException {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 717a2fafa1..05b7015423 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -149,6 +149,7 @@ import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_S3_GPRC_SERVER_ENABLED;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS;
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED;
+import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.USER_MISMATCH;
 import static 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
 
 import org.apache.ozone.test.LambdaTestUtils;
@@ -176,6 +177,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
@@ -838,21 +840,12 @@ final class TestSecureOzoneCluster {
           OmTransportFactory.create(conf, ugiNonAdmin, null),
           RandomStringUtils.randomAscii(5));
 
-      try {
-        omClientNonAdmin.getS3Secret("HADOOP/JOHN");
-        // Expected to fail because current ugi isn't an admin
-        fail("non-admin getS3Secret didn't fail as intended");
-      } catch (IOException ex) {
-        GenericTestUtils.assertExceptionContains("USER_MISMATCH", ex);
-      }
-
-      try {
-        omClientNonAdmin.revokeS3Secret("HADOOP/DOE");
-        // Expected to fail because current ugi isn't an admin
-        fail("non-admin revokeS3Secret didn't fail as intended");
-      } catch (IOException ex) {
-        GenericTestUtils.assertExceptionContains("USER_MISMATCH", ex);
-      }
+      OMException omException = assertThrows(OMException.class,
+          () -> omClientNonAdmin.getS3Secret("HADOOP/JOHN"));
+      assertSame(USER_MISMATCH, omException.getResult());
+      omException = assertThrows(OMException.class,
+          () -> omClientNonAdmin.revokeS3Secret("HADOOP/DOE"));
+      assertSame(USER_MISMATCH, omException.getResult());
 
     } finally {
       if (scm != null) {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 6eaf051ba4..015c57a025 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -60,13 +60,14 @@ import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
 import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
 import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
 
 import java.io.IOException;
 import java.nio.file.Path;
@@ -83,7 +84,9 @@ import java.util.UUID;
 import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
 import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
@@ -732,26 +735,14 @@ public class TestOzoneClientMultipartUploadWithFSO {
 
   }
 
-  @Test
-  public void testListPartsInvalidPartMarker() throws Exception {
-    try {
-      bucket.listParts(keyName, "random", -1, 2);
-      Assertions.fail("Should throw exception as partNumber is an invalid 
number!");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Should be greater than or "
-          + "equal to zero", ex);
-    }
-  }
+  @ParameterizedTest
+  @CsvSource(value = {"-1,2,Should be greater than or equal to zero",
+      "1,-1,Max Parts Should be greater than zero"})
+  public void testListPartsWithInvalidInputs(int partNumberMarker, int 
maxParts, String expectedErrorMessage) {
+    IllegalArgumentException exception = 
assertThrows(IllegalArgumentException.class,
+        () -> bucket.listParts(keyName, "random", partNumberMarker, maxParts));
 
-  @Test
-  public void testListPartsInvalidMaxParts() throws Exception {
-    try {
-      bucket.listParts(keyName, "random", 1, -1);
-      Assertions.fail("Should throw exception as max parts is an invalid 
number!");
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Max Parts Should be greater "
-          + "than zero", ex);
-    }
+    assertThat(exception).hasMessageContaining(expectedErrorMessage);
   }
 
   @Test
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 6d19c1ad38..1321fd0e82 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -145,12 +145,14 @@ import static 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentity
 import static 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ;
 
 import static 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
@@ -615,11 +617,9 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testDeleteS3NonExistingBucket() {
-    try {
-      store.deleteS3Bucket(UUID.randomUUID().toString());
-    } catch (IOException ex) {
-      GenericTestUtils.assertExceptionContains("NOT_FOUND", ex);
-    }
+    OMException omException = assertThrows(OMException.class, () -> 
store.deleteS3Bucket(UUID.randomUUID().toString()));
+    assertSame(ResultCodes.BUCKET_NOT_FOUND, omException.getResult());
+    assertThat(omException).hasMessage("Bucket not found");
   }
 
   @Test
@@ -900,8 +900,7 @@ public abstract class TestOzoneRpcClientAbstract {
   }
 
   @Test
-  public void testDeleteLinkedBucket()
-      throws Exception {
+  public void testDeleteLinkedBucket() throws Exception {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     String linkedBucketName = UUID.randomUUID().toString();
@@ -1040,65 +1039,51 @@ public abstract class TestOzoneRpcClientAbstract {
     int blockSize = (int) ozoneManager.getConfiguration().getStorageSize(
         OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES);
     int valueLength = value.getBytes(UTF_8).length;
-    int countException = 0;
 
     store.createVolume(volumeName);
     volume = store.getVolume(volumeName);
     volume.createBucket(bucketName);
-    OzoneBucket bucket = volume.getBucket(bucketName);
-    bucket.setQuota(OzoneQuota.parseQuota("1 B", "100"));
-
-    // Test bucket quota.
-    bucketName = UUID.randomUUID().toString();
-    volume.createBucket(bucketName);
-    bucket = volume.getBucket(bucketName);
+    final OzoneBucket bucket = volume.getBucket(bucketName);
     bucket.setQuota(OzoneQuota.parseQuota("1 B", "100"));
     store.getVolume(volumeName).setQuota(
         OzoneQuota.parseQuota(Long.MAX_VALUE + " B", "100"));
 
     // Test bucket quota: write key.
-    // The remaining quota does not satisfy a block size, so the write fails.
-    try {
-      writeKey(bucket, UUID.randomUUID().toString(), ONE, value, valueLength);
-    } catch (IOException ex) {
-      countException++;
-      GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex);
-    }
+    // The remaining quota does not satisfy a block size, so the writing fails.
+
+    OMException omException = assertThrows(OMException.class,
+        () -> writeKey(bucket, UUID.randomUUID().toString(), ONE, value, 
valueLength));
+    assertSame(ResultCodes.QUOTA_EXCEEDED, omException.getResult());
     // Write failed, bucket usedBytes should be 0
     assertEquals(0L,
         store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
 
     // Test bucket quota: write file.
     // The remaining quota does not satisfy a block size, so the write fails.
-    try {
-      writeFile(bucket, UUID.randomUUID().toString(), ONE, value, 0);
-    } catch (IOException ex) {
-      countException++;
-      GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex);
-    }
+
+    omException = assertThrows(OMException.class,
+        () -> writeFile(bucket, UUID.randomUUID().toString(), ONE, value, 0));
+    assertSame(ResultCodes.QUOTA_EXCEEDED, omException.getResult());
     // Write failed, bucket usedBytes should be 0
-    assertEquals(0L,
-        store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+    assertEquals(0L, 
store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
 
     // Test bucket quota: write large key(with five blocks), the first four
     // blocks will succeed,while the later block will fail.
     bucket.setQuota(OzoneQuota.parseQuota(
         4 * blockSize + " B", "100"));
-    try {
+
+    IOException ioException = assertThrows(IOException.class, () -> {
       String keyName = UUID.randomUUID().toString();
-      OzoneOutputStream out = bucket.createKey(keyName,
-          valueLength, RATIS, ONE, new HashMap<>());
-      for (int i = 0; i <= (4 * blockSize) / value.length(); i++) {
-        out.write(value.getBytes(UTF_8));
+      try (OzoneOutputStream out = bucket.createKey(keyName, valueLength, 
RATIS, ONE, new HashMap<>())) {
+        for (int i = 0; i <= (4 * blockSize) / value.length(); i++) {
+          out.write(value.getBytes(UTF_8));
+        }
       }
-      out.close();
-    } catch (IOException ex) {
-      countException++;
-      GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex);
-    }
+    });
+    
assertThat(ioException).hasCauseInstanceOf(OMException.class).hasMessageContaining("QUOTA_EXCEEDED");
+
     // AllocateBlock failed, bucket usedBytes should not update.
-    assertEquals(0L,
-        store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
+    assertEquals(0L, 
store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
 
     // Reset bucket quota, the original usedBytes needs to remain the same
     bucket.setQuota(OzoneQuota.parseQuota(
@@ -1106,8 +1091,6 @@ public abstract class TestOzoneRpcClientAbstract {
     assertEquals(0,
         store.getVolume(volumeName).getBucket(bucketName).getUsedBytes());
 
-    assertEquals(3, countException);
-
     // key with 0 bytes, usedBytes should not increase.
     bucket.setQuota(OzoneQuota.parseQuota(
         5 * blockSize + " B", "100"));
@@ -1385,11 +1368,9 @@ public abstract class TestOzoneRpcClientAbstract {
     volume = store.getVolume(volumeName);
     assertEquals(1L, volume.getUsedNamespace());
 
-    try {
-      volume.createBucket(bucketName2);
-    } catch (IOException ex) {
-      GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex);
-    }
+    OzoneVolume finalVolume = volume;
+    OMException omException = assertThrows(OMException.class, () -> 
finalVolume.createBucket(bucketName2));
+    assertEquals(ResultCodes.QUOTA_EXCEEDED, omException.getResult());
 
     // test linked bucket
     String targetVolName = UUID.randomUUID().toString();
@@ -1913,14 +1894,13 @@ public abstract class TestOzoneRpcClientAbstract {
 
     // Try reading the key. Since the chunk file is corrupted, it should
     // throw a checksum mismatch exception.
-    try {
+    IOException ioException = assertThrows(IOException.class, () -> {
       try (OzoneInputStream is = bucket.readKey(keyName)) {
         is.read(new byte[100]);
       }
-      fail("Reading corrupted data should fail.");
-    } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
-    }
+    });
+
+    assertThat(ioException).hasMessageContaining("Checksum mismatch");
   }
 
   // Make this executed at last, for it has some side effect to other UTs
@@ -2090,14 +2070,15 @@ public abstract class TestOzoneRpcClientAbstract {
       fail("Reading corrupted data should not fail.");
     }
     corruptData(containerList.get(2), key);
-    // Try reading the key. Read will fail here as all the replica are corrupt
-    try (OzoneInputStream is = bucket.readKey(keyName)) {
-      byte[] b = new byte[data.length];
-      is.read(b);
-      fail("Reading corrupted data should fail.");
-    } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
-    }
+    // Try reading the key. Read will fail here as all the replicas are corrupt
+
+    IOException ioException = assertThrows(IOException.class, () -> {
+      try (OzoneInputStream is = bucket.readKey(keyName)) {
+        byte[] b = new byte[data.length];
+        is.read(b);
+      }
+    });
+    assertThat(ioException).hasMessageContaining("Checksum mismatch");
   }
 
   private void corruptData(Container container, OzoneKey key)
@@ -3305,46 +3286,22 @@ public abstract class TestOzoneRpcClientAbstract {
 
   }
 
-  @Test
-  public void testListPartsInvalidPartMarker() throws Exception {
-    try {
-      String volumeName = UUID.randomUUID().toString();
-      String bucketName = UUID.randomUUID().toString();
-      String keyName = UUID.randomUUID().toString();
-
-      store.createVolume(volumeName);
-      OzoneVolume volume = store.getVolume(volumeName);
-      volume.createBucket(bucketName);
-      OzoneBucket bucket = volume.getBucket(bucketName);
-
-
-      OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-          bucket.listParts(keyName, "random", -1, 2);
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Should be greater than or " +
-          "equal to zero", ex);
-    }
-  }
-
-  @Test
-  public void testListPartsInvalidMaxParts() throws Exception {
-    try {
-      String volumeName = UUID.randomUUID().toString();
-      String bucketName = UUID.randomUUID().toString();
-      String keyName = UUID.randomUUID().toString();
+  @ParameterizedTest
+  @CsvSource(value = {"-1,1,Should be greater than or equal to zero", 
"2,-1,Max Parts Should be greater than zero"})
+  public void testListPartsInvalidInput(int partNumberMarker, int maxParts, 
String exceptedMessage) throws Exception {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
 
-      store.createVolume(volumeName);
-      OzoneVolume volume = store.getVolume(volumeName);
-      volume.createBucket(bucketName);
-      OzoneBucket bucket = volume.getBucket(bucketName);
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
 
+    IllegalArgumentException exception = 
assertThrows(IllegalArgumentException.class,
+        () -> bucket.listParts(keyName, "random", partNumberMarker, maxParts));
 
-      OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
-          bucket.listParts(keyName, "random", 1,  -1);
-    } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Max Parts Should be greater " +
-          "than zero", ex);
-    }
+    assertThat(exception).hasMessageContaining(exceptedMessage);
   }
 
   @Test
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
index 8f75e56805..89acf321e3 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
@@ -44,8 +44,11 @@ import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Timeout;
 
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
@@ -335,10 +338,9 @@ public class TestOzoneManagerConfiguration {
   /**
    * Test a wrong configuration for OM HA. A configuration with none of the
    * OM addresses matching the local address should throw an error.
-   * @throws Exception
    */
   @Test
-  public void testWrongConfiguration() throws Exception {
+  public void testWrongConfiguration() {
     String omServiceId = "om-service-test1";
 
     String omNode1Id = "omNode1";
@@ -360,14 +362,9 @@ public class TestOzoneManagerConfiguration {
     conf.set(omNode2RpcAddrKey, "125.0.0.2:9862");
     conf.set(omNode3RpcAddrKey, "124.0.0.124:9862");
 
-    try {
-      startCluster();
-      fail("Wrong Configuration. OM initialization should have failed.");
-    } catch (OzoneIllegalArgumentException e) {
-      GenericTestUtils.assertExceptionContains("Configuration has no " +
-          OMConfigKeys.OZONE_OM_ADDRESS_KEY + " address that matches local " +
-          "node's address.", e);
-    }
+    OzoneIllegalArgumentException exception = 
assertThrows(OzoneIllegalArgumentException.class, this::startCluster);
+    assertThat(exception).hasMessage(
+        "Configuration has no " + OZONE_OM_ADDRESS_KEY + " address that 
matches local node's address.");
   }
 
   /**
@@ -479,7 +476,7 @@ public class TestOzoneManagerConfiguration {
   }
 
   private String getOMAddrKeyWithSuffix(String serviceId, String nodeId) {
-    return ConfUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
+    return ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY,
         serviceId, nodeId);
   }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
index 7830d69965..c18d1f8b17 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java
@@ -66,6 +66,7 @@ import static 
org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK;
 import static 
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
@@ -336,12 +337,11 @@ public abstract class TestOzoneManagerHA {
         // ConnectException. Otherwise, we would get a RemoteException from the
         // last running OM as it would fail to get a quorum.
         if (e instanceof RemoteException) {
-          GenericTestUtils.assertExceptionContains("OMNotLeaderException", e);
+          assertThat(e).hasMessageContaining("is not the leader");
         } else if (e instanceof ConnectException) {
-          GenericTestUtils.assertExceptionContains("Connection refused", e);
+          assertThat(e).hasMessageContaining("Connection refused");
         } else {
-          GenericTestUtils.assertExceptionContains(
-              "Could not determine or connect to OM Leader", e);
+          assertThat(e).hasMessageContaining("Could not determine or connect 
to OM Leader");
         }
       } else {
         throw e;
@@ -445,12 +445,11 @@ public abstract class TestOzoneManagerHA {
         // ConnectException. Otherwise, we would get a RemoteException from the
         // last running OM as it would fail to get a quorum.
         if (e instanceof RemoteException) {
-          GenericTestUtils.assertExceptionContains("OMNotLeaderException", e);
+          assertThat(e).hasMessageContaining("is not the leader");
         } else if (e instanceof ConnectException) {
-          GenericTestUtils.assertExceptionContains("Connection refused", e);
+          assertThat(e).hasMessageContaining("Connection refused");
         } else {
-          GenericTestUtils.assertExceptionContains(
-              "Could not determine or connect to OM Leader", e);
+          assertThat(e).hasMessageContaining("Could not determine or connect 
to OM Leader");
         }
       } else {
         throw e;
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
index 8d933912c5..0a8c5a6786 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.ozone.om;
 
+import com.google.protobuf.ServiceException;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.hdds.protocol.StorageType;
 import org.apache.hadoop.ozone.ClientVersion;
@@ -28,6 +29,7 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException;
 import org.apache.hadoop.ozone.om.ha.HadoopRpcOMFailoverProxyProvider;
 import org.apache.hadoop.ozone.om.ha.OMProxyInfo;
 import org.apache.hadoop.ozone.om.helpers.OMRatisHelper;
@@ -73,6 +75,7 @@ import static 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentity
 import static 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ;
 import static 
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE;
 import static 
org.apache.ratis.metrics.RatisMetrics.RATIS_APPLICATION_NAME_METRICS;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -385,10 +388,10 @@ class TestOzoneManagerHAWithAllRunning extends 
TestOzoneManagerHA {
 
     OzoneManagerProtocolServerSideTranslatorPB omServerProtocol =
         followerOM.getOmServerProtocol();
-    Exception ex = assertThrows(Exception.class,
+    ServiceException ex = assertThrows(ServiceException.class,
         () -> omServerProtocol.submitRequest(null, writeRequest));
-    GenericTestUtils.assertExceptionContains("Suggested leader is OM:" +
-        leaderOMNodeId + "[" + leaderOMAddress + "]", ex);
+    assertThat(ex).hasCauseInstanceOf(OMNotLeaderException.class)
+        .hasMessageEndingWith("Suggested leader is OM:" + leaderOMNodeId + "[" 
+ leaderOMAddress + "].");
   }
 
   @Test
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
index c95bb1e35f..feccd5b30e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import org.apache.ozone.test.GenericTestUtils;
 
 import org.apache.commons.lang3.RandomStringUtils;
 import org.junit.jupiter.api.AfterAll;
@@ -49,11 +48,13 @@ import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
 import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
+import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS;
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
 import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME;
+import static 
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * Test some client operations after cluster starts. And perform restart and
@@ -122,12 +123,8 @@ public class TestOzoneManagerRestart {
     cluster.restartStorageContainerManager(true);
 
     // After restart, try to create same volume again, it should fail.
-    try {
-      objectStore.createVolume(volumeName);
-      fail("testRestartOM failed");
-    } catch (IOException ex) {
-      GenericTestUtils.assertExceptionContains("VOLUME_ALREADY_EXISTS", ex);
-    }
+    OMException ome = assertThrows(OMException.class, () -> 
objectStore.createVolume(volumeName));
+    assertEquals(VOLUME_ALREADY_EXISTS, ome.getResult());
 
     // Get Volume.
     ozoneVolume = objectStore.getVolume(volumeName);
@@ -157,12 +154,9 @@ public class TestOzoneManagerRestart {
     cluster.restartStorageContainerManager(true);
 
     // After restart, try to create same bucket again, it should fail.
-    try {
-      ozoneVolume.createBucket(bucketName);
-      fail("testRestartOMWithBucketOperation failed");
-    } catch (IOException ex) {
-      GenericTestUtils.assertExceptionContains("BUCKET_ALREADY_EXISTS", ex);
-    }
+    // After restart, try to create same volume again, it should fail.
+    OMException ome = assertThrows(OMException.class, () -> 
ozoneVolume.createBucket(bucketName));
+    assertEquals(BUCKET_ALREADY_EXISTS, ome.getResult());
 
     // Get bucket.
     ozoneBucket = ozoneVolume.getBucket(bucketName);
diff --git a/hadoop-ozone/interface-storage/pom.xml 
b/hadoop-ozone/interface-storage/pom.xml
index dc6a53c1ad..e3271730c2 100644
--- a/hadoop-ozone/interface-storage/pom.xml
+++ b/hadoop-ozone/interface-storage/pom.xml
@@ -69,7 +69,11 @@
       <artifactId>hdds-test-utils</artifactId>
       <scope>test</scope>
     </dependency>
-
+    <dependency>
+      <groupId>org.apache.ozone</groupId>
+      <artifactId>hdds-hadoop-dependency-test</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
   <build>
     <plugins>
diff --git 
a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java
 
b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java
index 68a49b0ce5..31846c44a7 100644
--- 
a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java
+++ 
b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartKeyInfoCodec.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.utils.db.Codec;
 import org.apache.hadoop.hdds.utils.db.Proto2CodecTestBase;
-import org.apache.ozone.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Test;
@@ -30,6 +29,7 @@ import org.junit.jupiter.api.Test;
 import java.util.UUID;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Test {@link OmMultipartKeyInfo#getCodec()}.
@@ -72,11 +72,9 @@ public class TestOmMultipartKeyInfoCodec
     try {
       codec.fromPersistedFormat("random".getBytes(UTF_8));
     } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("Can't encode the the raw " +
-          "data from the byte array", ex);
+      assertThat(ex).hasMessage("Can't encode the the raw data from the byte 
array");
     } catch (java.io.IOException e) {
       e.printStackTrace();
     }
-
   }
 }
diff --git 
a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestTransactionInfoCodec.java
 
b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestTransactionInfoCodec.java
index 717758c0ac..6f9520de76 100644
--- 
a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestTransactionInfoCodec.java
+++ 
b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestTransactionInfoCodec.java
@@ -20,13 +20,13 @@ package org.apache.hadoop.ozone.om.helpers;
 import org.apache.hadoop.hdds.utils.TransactionInfo;
 import org.apache.hadoop.hdds.utils.db.Codec;
 import org.apache.hadoop.hdds.utils.db.Proto2CodecTestBase;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.Test;
 
 import java.nio.charset.StandardCharsets;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 
 /**
  * Test {@link TransactionInfo#getCodec()}.
@@ -52,13 +52,9 @@ public class TestTransactionInfoCodec
   }
 
   @Test
-  public void testInvalidProtocolBuffer() throws Exception {
-    try {
-      
getCodec().fromPersistedFormat("random".getBytes(StandardCharsets.UTF_8));
-      fail("testInvalidProtocolBuffer failed");
-    } catch (IllegalArgumentException e) {
-      GenericTestUtils.assertExceptionContains(
-          "Incorrect TransactionInfo value", e);
-    }
+  public void testInvalidProtocolBuffer() {
+    IllegalArgumentException ex = assertThrows(IllegalArgumentException.class,
+        () -> 
getCodec().fromPersistedFormat("random".getBytes(StandardCharsets.UTF_8)));
+    assertThat(ex).hasMessage("Incorrect TransactionInfo value");
   }
 }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
index 27bed51a14..75c27f70d5 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
@@ -42,6 +42,8 @@ import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
+import static org.assertj.core.api.Assertions.assertThat;
+
 /**
  * Tests OM failover protocols using a Mock Failover provider and a Mock OM
  * Protocol.
@@ -68,26 +70,16 @@ public class TestOMFailovers {
             failoverProxyProvider.getRetryPolicy(
                 OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT));
 
-    try {
-      proxy.submitRequest(null, null);
-      Assertions.fail("Request should fail with AccessControlException");
-    } catch (Exception ex) {
-      Assertions.assertTrue(ex instanceof ServiceException);
-
-      // Request should try all OMs one be one and fail when the last OM also
-      // throws AccessControlException.
-      GenericTestUtils.assertExceptionContains("ServiceException of " +
-          "type class org.apache.hadoop.security.AccessControlException for " +
-          "om3", ex);
-      Assertions.assertTrue(ex.getCause() instanceof AccessControlException);
-
-      Assertions.assertTrue(
-          logCapturer.getOutput().contains(getRetryProxyDebugMsg("om1")));
-      Assertions.assertTrue(
-          logCapturer.getOutput().contains(getRetryProxyDebugMsg("om2")));
-      Assertions.assertTrue(
-          logCapturer.getOutput().contains(getRetryProxyDebugMsg("om3")));
-    }
+    ServiceException serviceException = 
Assertions.assertThrows(ServiceException.class,
+        () -> proxy.submitRequest(null, null));
+
+    // Request should try all OMs one be one and fail when the last OM also
+    // throws AccessControlException.
+    
assertThat(serviceException).hasCauseInstanceOf(AccessControlException.class)
+        .hasMessage("ServiceException of type class 
org.apache.hadoop.security.AccessControlException for om3");
+    
Assertions.assertTrue(logCapturer.getOutput().contains(getRetryProxyDebugMsg("om1")));
+    
Assertions.assertTrue(logCapturer.getOutput().contains(getRetryProxyDebugMsg("om2")));
+    
Assertions.assertTrue(logCapturer.getOutput().contains(getRetryProxyDebugMsg("om3")));
   }
 
   private String getRetryProxyDebugMsg(String omNodeId) {
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
index 2687ade3d3..708565b248 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java
@@ -23,7 +23,6 @@ import java.util.UUID;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
 import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Test;
 
@@ -34,6 +33,7 @@ import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.mockito.Mockito.when;
@@ -77,15 +77,11 @@ public class TestOMVolumeCreateRequest extends 
TestOMVolumeRequest {
       OMClientResponse omClientResponse =
           omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 
txLogIndex);
       Assertions.assertTrue(omClientResponse instanceof 
OMVolumeCreateResponse);
-      OMVolumeCreateResponse respone =
-          (OMVolumeCreateResponse) omClientResponse;
-      Assertions.assertEquals(expectedObjId, respone.getOmVolumeArgs()
-          .getObjectID());
-      Assertions.assertEquals(txLogIndex,
-          respone.getOmVolumeArgs().getUpdateID());
+      OMVolumeCreateResponse response = (OMVolumeCreateResponse) 
omClientResponse;
+      Assertions.assertEquals(expectedObjId, 
response.getOmVolumeArgs().getObjectID());
+      Assertions.assertEquals(txLogIndex, 
response.getOmVolumeArgs().getUpdateID());
     } catch (IllegalArgumentException ex) {
-      GenericTestUtils.assertExceptionContains("should be greater than zero",
-          ex);
+      assertThat(ex).hasMessage("should be greater than zero");
     }
   }
 
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
index 81f492c3e4..be9c1f7fd3 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
@@ -19,20 +19,22 @@ package org.apache.hadoop.ozone.s3;
 
 import org.apache.hadoop.fs.InvalidRequestException;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.ozone.test.GenericTestUtils;
 import org.glassfish.jersey.internal.PropertiesDelegate;
 import org.glassfish.jersey.server.ContainerRequest;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
 import org.mockito.Mockito;
 
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.SecurityContext;
 import java.net.URI;
 
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * This class test virtual host style mapping conversion to path style.
@@ -199,41 +201,15 @@ public class TestVirtualHostStyleFilter {
 
   }
 
-  @Test
-  public void testVirtualHostStyleWithNoMatchingDomain() throws Exception {
-
-    VirtualHostStyleFilter virtualHostStyleFilter =
-        new VirtualHostStyleFilter();
-    virtualHostStyleFilter.setConfiguration(conf);
-
-    ContainerRequest containerRequest = createContainerRequest("mybucket" +
-        ".myhost:9999", null, null, true);
-    try {
-      virtualHostStyleFilter.filter(containerRequest);
-      fail("testVirtualHostStyleWithNoMatchingDomain");
-    } catch (InvalidRequestException ex) {
-      GenericTestUtils.assertExceptionContains("No matching domain", ex);
-    }
-
-  }
-
-  @Test
-  public void testIncorrectVirtualHostStyle() throws
-      Exception {
-
-    VirtualHostStyleFilter virtualHostStyleFilter =
-        new VirtualHostStyleFilter();
+  @ParameterizedTest
+  @CsvSource(value = {"mybucket.myhost:9999,No matching domain", 
"mybucketlocalhost:9878,invalid format"})
+  public void testVirtualHostStyleWithInvalidInputs(String hostAddress,
+                                                    String expectErrorMessage) 
throws Exception {
+    VirtualHostStyleFilter virtualHostStyleFilter = new 
VirtualHostStyleFilter();
     virtualHostStyleFilter.setConfiguration(conf);
-
-    ContainerRequest containerRequest = createContainerRequest("mybucket" +
-        "localhost:9878", null, null, true);
-    try {
-      virtualHostStyleFilter.filter(containerRequest);
-      fail("testIncorrectVirtualHostStyle failed");
-    } catch (InvalidRequestException ex) {
-      GenericTestUtils.assertExceptionContains("invalid format", ex);
-    }
-
+    ContainerRequest containerRequest = createContainerRequest(hostAddress, 
null, null, true);
+    InvalidRequestException exception = 
assertThrows(InvalidRequestException.class,
+        () -> virtualHostStyleFilter.filter(containerRequest));
+    assertThat(exception).hasMessageContaining(expectErrorMessage);
   }
-
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to