This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new a54d83edc0 HDDS-10277. Remove unnecessary fail() from tests (#6166)
a54d83edc0 is described below
commit a54d83edc0c89fb343f77157229a6405c4d60bd5
Author: Zhaohui Wang <[email protected]>
AuthorDate: Tue Feb 6 16:16:33 2024 +0800
HDDS-10277. Remove unnecessary fail() from tests (#6166)
---
.../hdds/scm/client/TestHddsClientUtils.java | 11 +-
.../hadoop/hdds/protocol/TestDatanodeDetails.java | 9 +-
.../hadoop/ozone/common/TestChunkBuffer.java | 25 +--
.../ozone/container/common/TestContainerCache.java | 9 +-
.../container/common/TestDatanodeStoreCache.java | 9 +-
.../common/impl/TestContainerDataYaml.java | 51 +++---
.../container/keyvalue/TestKeyValueContainer.java | 5 +-
.../certificate/authority/TestDefaultCAServer.java | 10 +-
.../hadoop/hdds/scm/block/TestBlockManager.java | 110 +++++-------
.../apache/hadoop/hdds/scm/ha/TestSCMContext.java | 11 +-
.../hadoop/hdds/scm/node/TestSCMNodeManager.java | 17 +-
.../scm/security/TestRootCARotationManager.java | 16 +-
.../hadoop/ozone/client/TestOzoneECClient.java | 15 +-
.../hdds/scm/pipeline/TestPipelineClose.java | 9 +-
.../safemode/TestSCMSafeModeWithPipelineRules.java | 8 +-
.../TestContainerStateMachineIdempotency.java | 56 +++---
.../hadoop/ozone/TestSecureOzoneCluster.java | 14 +-
.../rpc/TestContainerStateMachineFailures.java | 104 ++++-------
.../client/rpc/TestDeleteWithInAdequateDN.java | 16 +-
.../rpc/TestOzoneClientRetriesOnExceptions.java | 112 ++++++------
.../client/rpc/TestOzoneRpcClientAbstract.java | 25 +--
.../client/rpc/TestOzoneRpcClientWithRatis.java | 9 -
.../container/ozoneimpl/TestOzoneContainer.java | 17 +-
.../hadoop/ozone/om/TestOmSnapshotFileSystem.java | 4 +-
.../apache/hadoop/ozone/om/TestScmSafeMode.java | 32 +---
.../ozone/om/service/TestRangerBGSyncService.java | 198 ++++++++-------------
.../hadoop/ozone/om/snapshot/TestOmSnapshot.java | 19 +-
.../om/helpers/TestRepeatedOmKeyInfoCodec.java | 36 ++--
.../hadoop/ozone/om/TestBucketManagerImpl.java | 13 +-
.../org/apache/hadoop/ozone/om/TestOMStorage.java | 17 +-
.../volume/TestOMVolumeCreateResponse.java | 17 +-
.../volume/TestOMVolumeSetOwnerResponse.java | 17 +-
.../volume/TestOMVolumeSetQuotaResponse.java | 16 +-
.../ozone/security/TestOzoneTokenIdentifier.java | 9 +-
.../hadoop/ozone/s3/TestAuthorizationFilter.java | 100 +++++------
35 files changed, 425 insertions(+), 721 deletions(-)
diff --git
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/client/TestHddsClientUtils.java
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/client/TestHddsClientUtils.java
index 53e41863a0..29b71e9b18 100644
---
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/client/TestHddsClientUtils.java
+++
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/client/TestHddsClientUtils.java
@@ -50,7 +50,6 @@ import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* This test class verifies the parsing of SCM endpoint config settings. The
@@ -234,7 +233,7 @@ public class TestHddsClientUtils {
}
@Test
- public void testVerifyKeyName() {
+ void testVerifyKeyName() throws IllegalArgumentException {
List<String> invalidNames = new ArrayList<>();
invalidNames.add("#");
invalidNames.add("ab^cd");
@@ -276,13 +275,7 @@ public class TestHddsClientUtils {
validNames.add("dollar$");
for (String name : validNames) {
- try {
- HddsClientUtils.verifyKeyName(name);
- // not throwing up on a valid name. it's working.
- } catch (IllegalArgumentException e) {
- // throwing up on an valid name. it's not working.
- fail("Rejected valid string [" + name + "] as a name");
- }
+ HddsClientUtils.verifyKeyName(name);
}
}
diff --git
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
index 4030f6e46d..b05deaa0d6 100644
---
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
+++
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
@@ -29,7 +29,6 @@ import static
org.apache.hadoop.ozone.ClientVersion.DEFAULT_VERSION;
import static
org.apache.hadoop.ozone.ClientVersion.VERSION_HANDLES_UNKNOWN_DN_PORTS;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test for {@link DatanodeDetails}.
@@ -50,14 +49,10 @@ public class TestDatanodeDetails {
}
public static void assertPorts(HddsProtos.DatanodeDetailsProto dn,
- Set<Port.Name> expectedPorts) {
+ Set<Port.Name> expectedPorts) throws IllegalArgumentException {
assertEquals(expectedPorts.size(), dn.getPortsCount());
for (HddsProtos.Port port : dn.getPortsList()) {
- try {
- assertThat(expectedPorts).contains(Port.Name.valueOf(port.getName()));
- } catch (IllegalArgumentException e) {
- fail("Unknown port: " + port.getName());
- }
+ assertThat(expectedPorts).contains(Port.Name.valueOf(port.getName()));
}
}
diff --git
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
index fa8889cd10..3d6d38f3d3 100644
---
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
+++
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java
@@ -37,7 +37,6 @@ import static
org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test {@link ChunkBuffer} implementations.
@@ -49,7 +48,7 @@ public class TestChunkBuffer {
@Test
@Timeout(1)
- public void testImplWithByteBuffer() {
+ void testImplWithByteBuffer() throws IOException {
runTestImplWithByteBuffer(1);
runTestImplWithByteBuffer(1 << 10);
for (int i = 0; i < 10; i++) {
@@ -57,7 +56,7 @@ public class TestChunkBuffer {
}
}
- private static void runTestImplWithByteBuffer(int n) {
+ private static void runTestImplWithByteBuffer(int n) throws IOException {
final byte[] expected = new byte[n];
ThreadLocalRandom.current().nextBytes(expected);
runTestImpl(expected, 0, ChunkBuffer.allocate(n));
@@ -65,7 +64,7 @@ public class TestChunkBuffer {
@Test
@Timeout(1)
- public void testIncrementalChunkBuffer() {
+ void testIncrementalChunkBuffer() throws IOException {
runTestIncrementalChunkBuffer(1, 1);
runTestIncrementalChunkBuffer(4, 8);
runTestIncrementalChunkBuffer(16, 1 << 10);
@@ -76,7 +75,7 @@ public class TestChunkBuffer {
}
}
- private static void runTestIncrementalChunkBuffer(int increment, int n) {
+ private static void runTestIncrementalChunkBuffer(int increment, int n)
throws IOException {
final byte[] expected = new byte[n];
ThreadLocalRandom.current().nextBytes(expected);
runTestImpl(expected, increment,
@@ -85,7 +84,7 @@ public class TestChunkBuffer {
@Test
@Timeout(1)
- public void testImplWithList() {
+ void testImplWithList() throws IOException {
runTestImplWithList(4, 8);
runTestImplWithList(16, 1 << 10);
for (int i = 0; i < 10; i++) {
@@ -95,7 +94,7 @@ public class TestChunkBuffer {
}
}
- private static void runTestImplWithList(int count, int n) {
+ private static void runTestImplWithList(int count, int n) throws IOException
{
final byte[] expected = new byte[n];
ThreadLocalRandom.current().nextBytes(expected);
@@ -117,7 +116,7 @@ public class TestChunkBuffer {
runTestImpl(expected, -1, impl);
}
- private static void runTestImpl(byte[] expected, int bpc, ChunkBuffer impl) {
+ private static void runTestImpl(byte[] expected, int bpc, ChunkBuffer impl)
throws IOException {
final int n = expected.length;
System.out.println("n=" + n + ", impl=" + impl);
@@ -207,18 +206,12 @@ public class TestChunkBuffer {
"offset=" + offset + ", length=" + length);
}
- private static void assertWrite(byte[] expected, ChunkBuffer impl) {
+ private static void assertWrite(byte[] expected, ChunkBuffer impl) throws
IOException {
impl.rewind();
assertEquals(0, impl.position());
ByteArrayOutputStream output = new ByteArrayOutputStream(expected.length);
-
- try {
- impl.writeTo(new MockGatheringChannel(Channels.newChannel(output)));
- } catch (IOException e) {
- fail("Unexpected error: " + e);
- }
-
+ impl.writeTo(new MockGatheringChannel(Channels.newChannel(output)));
assertArrayEquals(expected, output.toByteArray());
assertFalse(impl.hasRemaining());
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
index 208f521ec3..2381209bb6 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
@@ -33,7 +33,6 @@ import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
@@ -145,7 +144,7 @@ public class TestContainerCache {
}
@Test
- public void testConcurrentDBGet() throws Exception {
+ void testConcurrentDBGet() throws Exception {
File root = new File(testRoot);
root.mkdirs();
root.deleteOnExit();
@@ -172,11 +171,7 @@ public class TestContainerCache {
futureList.add(executorService.submit(task));
futureList.add(executorService.submit(task));
for (Future future: futureList) {
- try {
- future.get();
- } catch (InterruptedException | ExecutionException e) {
- fail("Should get the DB instance");
- }
+ future.get();
}
ReferenceCountedDB db = cache.getDB(1, "RocksDB",
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
index 32b1fc284b..e00df0579d 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
@@ -31,7 +31,6 @@ import java.nio.file.Path;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test DatanodeStoreCache.
@@ -43,7 +42,7 @@ public class TestDatanodeStoreCache {
private OzoneConfiguration conf = new OzoneConfiguration();
@Test
- public void testBasicOperations() throws IOException {
+ void testBasicOperations() throws IOException {
DatanodeStoreCache cache = DatanodeStoreCache.getInstance();
String dbPath1 = Files.createDirectory(folder.resolve("basic1"))
.toFile().toString();
@@ -71,11 +70,7 @@ public class TestDatanodeStoreCache {
assertEquals(1, cache.size());
// test remove non-exist
- try {
- cache.removeDB(dbPath1);
- } catch (Exception e) {
- fail("Should not throw " + e);
- }
+ cache.removeDB(dbPath1);
// test shutdown
cache.shutdownCache();
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index 4ccfb2e35d..ec78398824 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -43,7 +43,6 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* This class tests create/read .container files.
@@ -205,41 +204,35 @@ public class TestContainerDataYaml {
@ContainerLayoutTestInfo.ContainerTest
- public void testCheckBackWardCompatibilityOfContainerFile(
- ContainerLayoutVersion layout) {
+ void testCheckBackWardCompatibilityOfContainerFile(
+ ContainerLayoutVersion layout) throws Exception {
setLayoutVersion(layout);
// This test is for if we upgrade, and then .container files added by new
// server will have new fields added to .container file, after a while we
// decided to rollback. Then older ozone can read .container files
// created or not.
- try {
- String containerFile = "additionalfields.container";
- //Get file from resources folder
- ClassLoader classLoader = getClass().getClassLoader();
- File file = new File(classLoader.getResource(containerFile).getFile());
- KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
- .readContainerFile(file);
- ContainerUtils.verifyChecksum(kvData, conf);
+ String containerFile = "additionalfields.container";
+ //Get file from resources folder
+ ClassLoader classLoader = getClass().getClassLoader();
+ File file = new File(classLoader.getResource(containerFile).getFile());
+ KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
+ .readContainerFile(file);
+ ContainerUtils.verifyChecksum(kvData, conf);
- //Checking the Container file data is consistent or not
- assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
- .getState());
- assertEquals(CONTAINER_DB_TYPE, kvData.getContainerDBType());
- assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
- .getContainerType());
- assertEquals(9223372036854775807L, kvData.getContainerID());
- assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
- .getChunksPath());
- assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
- .getMetadataPath());
- assertEquals(FILE_PER_CHUNK, kvData.getLayoutVersion());
- assertEquals(2, kvData.getMetadata().size());
-
- } catch (Exception ex) {
- ex.printStackTrace();
- fail("testCheckBackWardCompatibilityOfContainerFile failed");
- }
+ //Checking the Container file data is consistent or not
+ assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
+ .getState());
+ assertEquals(CONTAINER_DB_TYPE, kvData.getContainerDBType());
+ assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
+ .getContainerType());
+ assertEquals(9223372036854775807L, kvData.getContainerID());
+ assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
+ .getChunksPath());
+ assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
+ .getMetadataPath());
+ assertEquals(FILE_PER_CHUNK, kvData.getLayoutVersion());
+ assertEquals(2, kvData.getMetadata().size());
}
/**
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index b109669546..15d0faefdf 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -53,7 +53,6 @@ import
org.apache.hadoop.ozone.container.metadata.DatanodeStore;
import org.apache.hadoop.ozone.container.replication.CopyContainerCompression;
import org.apache.hadoop.util.DiskChecker;
-import org.assertj.core.api.Fail;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -812,7 +811,7 @@ public class TestKeyValueContainer {
}
@ContainerTestVersionInfo.ContainerTest
- public void testAutoCompactionSmallSstFile(
+ void testAutoCompactionSmallSstFile(
ContainerTestVersionInfo versionInfo) throws Exception {
init(versionInfo);
assumeTrue(isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3));
@@ -903,8 +902,6 @@ public class TestKeyValueContainer {
List<LiveFileMetaData> fileMetaDataList2 =
((RDBStore)(dnStore.getStore())).getDb().getLiveFilesMetaData();
assertThat(fileMetaDataList2.size()).isLessThan(fileMetaDataList1.size());
- } catch (Exception e) {
- Fail.fail("TestAutoCompactionSmallSstFile failed");
} finally {
// clean up
for (KeyValueContainer c : containerList) {
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java
index cb4f82443c..f98ad41e8d 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java
@@ -78,7 +78,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Tests the Default CA Server.
@@ -449,7 +448,7 @@ public class TestDefaultCAServer {
}
@Test
- public void testIntermediaryCA() throws Exception {
+ void testIntermediaryCA() throws Exception {
conf.set(HddsConfigKeys.HDDS_X509_MAX_DURATION, "P3650D");
securityConfig = new SecurityConfig(conf);
@@ -519,11 +518,8 @@ public class TestDefaultCAServer {
clusterId, scmId, caStore, new DefaultProfile(),
scmCertificateClient.getComponentName());
- try {
- scmCA.init(securityConfig, CAType.SUBORDINATE);
- } catch (Exception e) {
- fail("testIntermediaryCA failed during init");
- }
+
+ scmCA.init(securityConfig, CAType.SUBORDINATE);
}
}
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
index 6352cbf197..6438b6f8d4 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
@@ -83,7 +83,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
@@ -248,7 +247,7 @@ public class TestBlockManager {
}
@Test
- public void testAllocateBlockInParallel() {
+ void testAllocateBlockInParallel() throws Exception {
int threadCount = 20;
List<ExecutorService> executors = new ArrayList<>(threadCount);
for (int i = 0; i < threadCount; i++) {
@@ -272,17 +271,14 @@ public class TestBlockManager {
}, executors.get(i));
futureList.add(future);
}
- try {
- CompletableFuture
- .allOf(futureList.toArray(new CompletableFuture[futureList.size()]))
- .get();
- } catch (Exception e) {
- fail("testAllocateBlockInParallel failed");
- }
+
+ CompletableFuture
+ .allOf(futureList.toArray(new CompletableFuture[futureList.size()]))
+ .get();
}
@Test
- public void testBlockDistribution() throws Exception {
+ void testBlockDistribution() throws Exception {
int threadCount = numContainerPerOwnerInPipeline *
numContainerPerOwnerInPipeline;
nodeManager.setNumPipelinePerDatanode(1);
@@ -322,24 +318,19 @@ public class TestBlockManager {
}, executors.get(i));
futureList.add(future);
}
- try {
- CompletableFuture.allOf(futureList.toArray(
- new CompletableFuture[0])).get();
-
- assertEquals(1, pipelineManager.getPipelines(replicationConfig).size());
- assertEquals(numContainerPerOwnerInPipeline, allocatedBlockMap.size());
- assertEquals(numContainerPerOwnerInPipeline,
allocatedBlockMap.values().size());
- allocatedBlockMap.values().forEach(v -> {
- assertEquals(numContainerPerOwnerInPipeline, v.size());
- });
- } catch (Exception e) {
- fail("testAllocateBlockInParallel failed");
- }
+ CompletableFuture.allOf(futureList.toArray(new
CompletableFuture[0])).get();
+
+ assertEquals(1, pipelineManager.getPipelines(replicationConfig).size());
+ assertEquals(numContainerPerOwnerInPipeline, allocatedBlockMap.size());
+ assertEquals(numContainerPerOwnerInPipeline,
allocatedBlockMap.values().size());
+ allocatedBlockMap.values().forEach(v -> {
+ assertEquals(numContainerPerOwnerInPipeline, v.size());
+ });
}
@Test
- public void testBlockDistributionWithMultipleDisks() throws Exception {
+ void testBlockDistributionWithMultipleDisks() throws Exception {
int threadCount = numContainerPerOwnerInPipeline *
numContainerPerOwnerInPipeline;
nodeManager.setNumHealthyVolumes(numContainerPerOwnerInPipeline);
@@ -380,30 +371,26 @@ public class TestBlockManager {
}, executors.get(i));
futureList.add(future);
}
- try {
- CompletableFuture
- .allOf(futureList.toArray(
- new CompletableFuture[futureList.size()])).get();
- assertEquals(1,
- pipelineManager.getPipelines(replicationConfig).size());
- Pipeline pipeline =
- pipelineManager.getPipelines(replicationConfig).get(0);
- // total no of containers to be created will be number of healthy
- // volumes * number of numContainerPerOwnerInPipeline which is equal to
- // the thread count
- assertEquals(threadCount,
pipelineManager.getNumberOfContainers(pipeline.getId()));
- assertEquals(threadCount, allocatedBlockMap.size());
- assertEquals(threadCount, allocatedBlockMap.values().size());
- allocatedBlockMap.values().forEach(v -> {
- assertEquals(1, v.size());
- });
- } catch (Exception e) {
- fail("testAllocateBlockInParallel failed");
- }
+ CompletableFuture
+ .allOf(futureList.toArray(
+ new CompletableFuture[futureList.size()])).get();
+ assertEquals(1,
+ pipelineManager.getPipelines(replicationConfig).size());
+ Pipeline pipeline =
+ pipelineManager.getPipelines(replicationConfig).get(0);
+ // total no of containers to be created will be number of healthy
+ // volumes * number of numContainerPerOwnerInPipeline which is equal to
+ // the thread count
+ assertEquals(threadCount,
pipelineManager.getNumberOfContainers(pipeline.getId()));
+ assertEquals(threadCount, allocatedBlockMap.size());
+ assertEquals(threadCount, allocatedBlockMap.values().size());
+ allocatedBlockMap.values().forEach(v -> {
+ assertEquals(1, v.size());
+ });
}
@Test
- public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception
{
+ void testBlockDistributionWithMultipleRaftLogDisks() throws Exception {
int threadCount = numContainerPerOwnerInPipeline *
numContainerPerOwnerInPipeline;
int numMetaDataVolumes = 2;
@@ -445,25 +432,20 @@ public class TestBlockManager {
}, executors.get(i));
futureList.add(future);
}
- try {
- CompletableFuture
- .allOf(futureList.toArray(
- new CompletableFuture[futureList.size()])).get();
- assertEquals(1,
- pipelineManager.getPipelines(replicationConfig).size());
- Pipeline pipeline =
- pipelineManager.getPipelines(replicationConfig).get(0);
- // the pipeline per raft log disk config is set to 1 by default
- int numContainers = (int)Math.ceil((double)
- (numContainerPerOwnerInPipeline *
- numContainerPerOwnerInPipeline) / numMetaDataVolumes);
- assertEquals(numContainers, pipelineManager.
- getNumberOfContainers(pipeline.getId()));
- assertEquals(numContainers, allocatedBlockMap.size());
- assertEquals(numContainers, allocatedBlockMap.values().size());
- } catch (Exception e) {
- fail("testAllocateBlockInParallel failed");
- }
+ CompletableFuture
+ .allOf(futureList.toArray(
+ new CompletableFuture[futureList.size()])).get();
+ assertEquals(1,
+ pipelineManager.getPipelines(replicationConfig).size());
+ Pipeline pipeline =
+ pipelineManager.getPipelines(replicationConfig).get(0);
+ // the pipeline per raft log disk config is set to 1 by default
+ int numContainers = (int)Math.ceil((double)
+ (numContainerPerOwnerInPipeline *
+ numContainerPerOwnerInPipeline) / numMetaDataVolumes);
+ assertEquals(numContainers,
pipelineManager.getNumberOfContainers(pipeline.getId()));
+ assertEquals(numContainers, allocatedBlockMap.size());
+ assertEquals(numContainers, allocatedBlockMap.values().size());
}
@Test
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java
index 642fbd635a..757a0ab0dc 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMContext.java
@@ -19,20 +19,18 @@
package org.apache.hadoop.hdds.scm.ha;
import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
-import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test for SCMContext.
*/
public class TestSCMContext {
@Test
- public void testRaftOperations() {
+ void testRaftOperations() throws Exception {
// start as follower
SCMContext scmContext = new SCMContext.Builder()
.setLeader(false).setTerm(0).buildMaybeInvalid();
@@ -44,11 +42,8 @@ public class TestSCMContext {
scmContext.setLeaderReady();
assertTrue(scmContext.isLeader());
assertTrue(scmContext.isLeaderReady());
- try {
- assertEquals(scmContext.getTermOfLeader(), 10);
- } catch (NotLeaderException e) {
- fail("Should not throw nle.");
- }
+ assertEquals(scmContext.getTermOfLeader(), 10);
+
// step down
scmContext.updateLeaderAndTerm(false, 0);
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 742ea35977..cc9133cf68 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -26,7 +26,6 @@ import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.UUID;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -773,8 +772,7 @@ public class TestSCMNodeManager {
* @throws AuthenticationException
*/
@Test
- public void testScmHandleJvmPause()
- throws IOException, InterruptedException, AuthenticationException {
+ void testScmHandleJvmPause() throws Exception {
final int healthCheckInterval = 200; // milliseconds
final int heartbeatInterval = 1; // seconds
final int staleNodeInterval = 3; // seconds
@@ -830,14 +828,11 @@ public class TestSCMNodeManager {
schedFuture = nodeManager.unpauseHealthCheck();
// Step 3 : wait for 1 iteration of health check
- try {
- schedFuture.get();
- assertThat(nodeManager.getSkippedHealthChecks())
- .withFailMessage("We did not skip any heartbeat checks")
- .isGreaterThan(0);
- } catch (ExecutionException e) {
- fail("Unexpected exception waiting for Scheduled Health Check");
- }
+
+ schedFuture.get();
+ assertThat(nodeManager.getSkippedHealthChecks())
+ .withFailMessage("We did not skip any heartbeat checks")
+ .isGreaterThan(0);
// Step 4 : all nodes should still be HEALTHY
assertEquals(2, nodeManager.getAllNodes().size());
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
index 28520c0578..b82ce15a38 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
@@ -66,7 +66,6 @@ import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_GRACE_DURATION_TOK
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION;
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_ROOTCA_CERTIFICATE_POLLING_INTERVAL;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
@@ -148,7 +147,7 @@ public class TestRootCARotationManager {
}
@Test
- public void testProperties() {
+ void testProperties() throws Exception {
// invalid check interval
ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P28");
assertThrows(DateTimeParseException.class, () -> rootCARotationManager =
new RootCARotationManager(scm));
@@ -169,20 +168,13 @@ public class TestRootCARotationManager {
ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P1D");
ozoneConfig.set(HDDS_X509_CA_ROTATION_TIME_OF_DAY, "01:00:00");
- try {
- rootCARotationManager = new RootCARotationManager(scm);
- } catch (Exception e) {
- fail("Should succeed");
- }
+ rootCARotationManager = new RootCARotationManager(scm);
// invalid property value is ignored when auto rotation is disabled.
ozoneConfig.setBoolean(HDDS_X509_CA_ROTATION_ENABLED, false);
ozoneConfig.set(HDDS_X509_CA_ROTATION_CHECK_INTERNAL, "P28D");
- try {
- rootCARotationManager = new RootCARotationManager(scm);
- } catch (Exception e) {
- fail("Should succeed");
- }
+
+ rootCARotationManager = new RootCARotationManager(scm);
}
@Test
diff --git
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
index c3e44c7d6a..25a3ad2d9c 100644
---
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
+++
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
@@ -66,7 +66,6 @@ import java.util.stream.IntStream;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
@@ -1030,7 +1029,7 @@ public class TestOzoneECClient {
}
@Test
- public void testDiscardPreAllocatedBlocksPreventRetryExceeds()
+ void testDiscardPreAllocatedBlocksPreventRetryExceeds()
throws Exception {
close();
OzoneConfiguration con = createConfiguration();
@@ -1100,16 +1099,10 @@ public class TestOzoneECClient {
factoryStub.setFailedStorages(failedDNs);
// Writes that will retry due to failed DNs
- try {
- for (int j = 0; j < numStripesAfterFailure; j++) {
- for (int i = 0; i < dataBlocks; i++) {
- out.write(inputChunks[i]);
- }
+ for (int j = 0; j < numStripesAfterFailure; j++) {
+ for (int i = 0; i < dataBlocks; i++) {
+ out.write(inputChunks[i]);
}
- } catch (IOException e) {
- // If we don't discard pre-allocated blocks,
- // retries should exceed the maxRetries and write will fail.
- fail("Max retries exceeded");
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index 6200b1191c..858a448675 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -62,7 +62,6 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.timeout;
@@ -212,7 +211,7 @@ public class TestPipelineClose {
}
@Test
- public void testPipelineCloseWithLogFailure()
+ void testPipelineCloseWithLogFailure()
throws IOException, TimeoutException {
EventQueue eventQ = (EventQueue) scm.getEventQueue();
PipelineActionHandler pipelineActionTest =
@@ -230,11 +229,7 @@ public class TestPipelineClose {
Pipeline openPipeline = containerWithPipeline.getPipeline();
RaftGroupId groupId = RaftGroupId.valueOf(openPipeline.getId().getId());
- try {
- pipelineManager.getPipeline(openPipeline.getId());
- } catch (PipelineNotFoundException e) {
- fail("pipeline should exist");
- }
+ pipelineManager.getPipeline(openPipeline.getId());
DatanodeDetails datanodeDetails = openPipeline.getNodes().get(0);
int index = cluster.getHddsDatanodeIndex(datanodeDetails);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
index d09e924ca8..40a8010393 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
@@ -86,7 +86,7 @@ public class TestSCMSafeModeWithPipelineRules {
@Test
- public void testScmSafeMode() throws Exception {
+ void testScmSafeMode() throws Exception {
int datanodeCount = 6;
setup(datanodeCount);
waitForRatis3NodePipelines(datanodeCount / 3);
@@ -136,11 +136,7 @@ public class TestSCMSafeModeWithPipelineRules {
DatanodeDetails restartedDatanode = pipelineList.get(1).getFirstNode();
// Now restart one datanode from the 2nd pipeline
- try {
- cluster.restartHddsDatanode(restartedDatanode, false);
- } catch (Exception ex) {
- fail("Datanode restart failed");
- }
+ cluster.restartHddsDatanode(restartedDatanode, false);
GenericTestUtils.waitFor(() ->
scmSafeModeManager.getOneReplicaPipelineSafeModeRule()
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
index 37d0703e9f..c727ecd0a9 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
@@ -41,11 +41,9 @@ import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
-import java.io.IOException;
import java.util.concurrent.TimeUnit;
import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Tests the idempotent operations in ContainerStateMachine.
@@ -80,43 +78,39 @@ public class TestContainerStateMachineIdempotency {
}
@Test
- public void testContainerStateMachineIdempotency() throws Exception {
+ void testContainerStateMachineIdempotency() throws Exception {
ContainerWithPipeline container = storageContainerLocationClient
.allocateContainer(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
long containerID = container.getContainerInfo().getContainerID();
Pipeline pipeline = container.getPipeline();
XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline);
- try {
- //create the container
- ContainerProtocolCalls.createContainer(client, containerID, null);
- // call create Container again
- BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
- byte[] data =
- RandomStringUtils.random(RandomUtils.nextInt(0, 1024))
- .getBytes(UTF_8);
- ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
- ContainerTestHelper
- .getWriteChunkRequest(container.getPipeline(), blockID,
- data.length);
- client.sendCommand(writeChunkRequest);
+ //create the container
+ ContainerProtocolCalls.createContainer(client, containerID, null);
+ // call create Container again
+ BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
+ byte[] data =
+ RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8);
+ ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
+ ContainerTestHelper
+ .getWriteChunkRequest(container.getPipeline(), blockID,
+ data.length);
+ client.sendCommand(writeChunkRequest);
- //Make the write chunk request again without requesting for overWrite
- client.sendCommand(writeChunkRequest);
- // Now, explicitly make a putKey request for the block.
- ContainerProtos.ContainerCommandRequestProto putKeyRequest =
- ContainerTestHelper
- .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
- client.sendCommand(putKeyRequest).getPutBlock();
- // send the putBlock again
- client.sendCommand(putKeyRequest);
+ //Make the write chunk request again without requesting for overWrite
+ client.sendCommand(writeChunkRequest);
+ // Now, explicitly make a putKey request for the block.
+ ContainerProtos.ContainerCommandRequestProto putKeyRequest =
+ ContainerTestHelper
+ .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
+ client.sendCommand(putKeyRequest).getPutBlock();
+ // send the putBlock again
+ client.sendCommand(putKeyRequest);
+
+ // close container call
+ ContainerProtocolCalls.closeContainer(client, containerID, null);
+ ContainerProtocolCalls.closeContainer(client, containerID, null);
- // close container call
- ContainerProtocolCalls.closeContainer(client, containerID, null);
- ContainerProtocolCalls.closeContainer(client, containerID, null);
- } catch (IOException ioe) {
- fail("Container operation failed" + ioe);
- }
xceiverClientManager.releaseClient(client, false);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 2eb2364152..1be5b64ac8 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -624,15 +624,11 @@ final class TestSecureOzoneCluster {
new OzoneManagerProtocolClientSideTranslatorPB(
OmTransportFactory.create(conf, ugi, null),
ClientId.randomId().toString());
- try {
- secureClient.createVolume(
- new OmVolumeArgs.Builder().setVolume("vol1")
- .setOwnerName("owner1")
- .setAdminName("admin")
- .build());
- } catch (IOException ex) {
- fail("Secure client should be able to create volume.");
- }
+ secureClient.createVolume(
+ new OmVolumeArgs.Builder().setVolume("vol1")
+ .setOwnerName("owner1")
+ .setAdminName("admin")
+ .build());
ugi = UserGroupInformation.createUserForTesting(
"testuser1", new String[] {"test"});
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 01ef2cf1be..2c11177e5e 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -492,7 +492,7 @@ public class TestContainerStateMachineFailures {
@Test
@Flaky("HDDS-6115")
- public void testApplyTransactionIdempotencyWithClosedContainer()
+ void testApplyTransactionIdempotencyWithClosedContainer()
throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
@@ -541,11 +541,7 @@ public class TestContainerStateMachineFailures {
request.setContainerID(containerID);
request.setCloseContainer(
ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
- try {
- xceiverClient.sendCommand(request.build());
- } catch (IOException e) {
- fail("Exception should not be thrown");
- }
+ xceiverClient.sendCommand(request.build());
assertSame(
TestHelper.getDatanodeService(omKeyLocationInfo, cluster)
.getDatanodeStateMachine()
@@ -555,8 +551,6 @@ public class TestContainerStateMachineFailures {
assertTrue(stateMachine.isStateMachineHealthy());
try {
stateMachine.takeSnapshot();
- } catch (IOException ioe) {
- fail("Exception should not be thrown");
} finally {
xceiverClientManager.releaseClient(xceiverClient, false);
}
@@ -583,7 +577,7 @@ public class TestContainerStateMachineFailures {
// not be marked unhealthy and pipeline should not fail if container gets
// closed here.
@Test
- public void testWriteStateMachineDataIdempotencyWithClosedContainer()
+ void testWriteStateMachineDataIdempotencyWithClosedContainer()
throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
@@ -697,11 +691,7 @@ public class TestContainerStateMachineFailures {
.getContainerState(),
ContainerProtos.ContainerDataProto.State.CLOSED);
assertTrue(stateMachine.isStateMachineHealthy());
- try {
- stateMachine.takeSnapshot();
- } catch (IOException ioe) {
- fail("Exception should not be thrown");
- }
+ stateMachine.takeSnapshot();
final FileInfo latestSnapshot = getSnapshotFileInfo(storage);
assertNotEquals(snapshot.getPath(), latestSnapshot.getPath());
@@ -713,43 +703,37 @@ public class TestContainerStateMachineFailures {
}
@Test
- public void testContainerStateMachineSingleFailureRetry()
+ void testContainerStateMachineSingleFailureRetry()
throws Exception {
- OzoneOutputStream key =
- objectStore.getVolume(volumeName).getBucket(bucketName)
- .createKey("ratis1", 1024,
- ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
- ReplicationFactor.THREE), new HashMap<>());
+ try (OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
+ .createKey("ratis1", 1024,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS,
+ ReplicationFactor.THREE), new HashMap<>())) {
- key.write("ratis".getBytes(UTF_8));
- key.flush();
- key.write("ratis".getBytes(UTF_8));
- key.write("ratis".getBytes(UTF_8));
-
- KeyOutputStream groupOutputStream = (KeyOutputStream) key.
- getOutputStream();
- List<OmKeyLocationInfo> locationInfoList =
- groupOutputStream.getLocationInfoList();
- assertEquals(1, locationInfoList.size());
+ key.write("ratis".getBytes(UTF_8));
+ key.flush();
+ key.write("ratis".getBytes(UTF_8));
+ key.write("ratis".getBytes(UTF_8));
- OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
+ KeyOutputStream groupOutputStream = (KeyOutputStream) key.
+ getOutputStream();
+ List<OmKeyLocationInfo> locationInfoList =
+ groupOutputStream.getLocationInfoList();
+ assertEquals(1, locationInfoList.size());
- induceFollowerFailure(omKeyLocationInfo, 2);
+ OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
- try {
+ induceFollowerFailure(omKeyLocationInfo, 2);
key.flush();
key.write("ratis".getBytes(UTF_8));
key.flush();
- key.close();
- } catch (Exception ioe) {
- // Should not fail..
- fail("Exception " + ioe.getMessage());
}
+
validateData("ratis1", 2, "ratisratisratisratis");
}
@Test
- public void testContainerStateMachineDualFailureRetry()
+ void testContainerStateMachineDualFailureRetry()
throws Exception {
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
@@ -772,15 +756,10 @@ public class TestContainerStateMachineFailures {
induceFollowerFailure(omKeyLocationInfo, 1);
- try {
- key.flush();
- key.write("ratis".getBytes(UTF_8));
- key.flush();
- key.close();
- } catch (Exception ioe) {
- // Should not fail..
- fail("Exception " + ioe.getMessage());
- }
+ key.flush();
+ key.write("ratis".getBytes(UTF_8));
+ key.flush();
+ key.close();
validateData("ratis1", 2, "ratisratisratisratis");
}
@@ -817,31 +796,24 @@ public class TestContainerStateMachineFailures {
}
}
- private void validateData(String key, int locationCount, String payload) {
+ private void validateData(String key, int locationCount, String payload)
throws Exception {
OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setKeyName(key)
.build();
- OmKeyInfo keyInfo = null;
- try {
- keyInfo = cluster.getOzoneManager().lookupKey(omKeyArgs);
-
- assertEquals(locationCount,
- keyInfo.getLatestVersionLocations().getLocationListCount());
- byte[] buffer = new byte[1024];
- try (OzoneInputStream o = objectStore.getVolume(volumeName)
- .getBucket(bucketName).readKey(key)) {
- o.read(buffer, 0, 1024);
- }
- int end = ArrayUtils.indexOf(buffer, (byte) 0);
- String response = new String(buffer, 0,
- end,
- StandardCharsets.UTF_8);
- assertEquals(payload, response);
- } catch (IOException e) {
- fail("Exception not expected " + e.getMessage());
+ OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(omKeyArgs);
+
+ assertEquals(locationCount,
+ keyInfo.getLatestVersionLocations().getLocationListCount());
+ byte[] buffer = new byte[1024];
+ try (OzoneInputStream o = objectStore.getVolume(volumeName)
+ .getBucket(bucketName).readKey(key)) {
+ o.read(buffer, 0, 1024);
}
+ int end = ArrayUtils.indexOf(buffer, (byte) 0);
+ String response = new String(buffer, 0, end, StandardCharsets.UTF_8);
+ assertEquals(payload, response);
}
static FileInfo getSnapshotFileInfo(SimpleStateMachineStorage storage)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
index ba975e6b21..fa50dac64f 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
@@ -74,7 +74,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.BeforeAll;
@@ -201,7 +200,7 @@ public class TestDeleteWithInAdequateDN {
* data is not deleted from any of the nodes which have the closed replica.
*/
@Test
- public void testDeleteKeyWithInAdequateDN() throws Exception {
+ void testDeleteKeyWithInAdequateDN() throws Exception {
String keyName = "ratis";
OzoneOutputStream key =
objectStore.getVolume(volumeName).getBucket(bucketName)
@@ -289,14 +288,11 @@ public class TestDeleteWithInAdequateDN {
deleteKey("ratis");
// make sure the chunk was never deleted on the leader even though
// deleteBlock handler is invoked
- try {
- for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
- keyValueHandler.getChunkManager()
- .readChunk(container, blockID,
ChunkInfo.getFromProtoBuf(chunkInfo),
- null);
- }
- } catch (IOException ioe) {
- fail("Exception should not be thrown.");
+
+ for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
+ keyValueHandler.getChunkManager()
+ .readChunk(container, blockID, ChunkInfo.getFromProtoBuf(chunkInfo),
+ null);
}
long numReadStateMachineOps =
stateMachine.getMetrics().getNumReadStateMachineOps();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
index 2c83cf7854..72b6880c17 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
@@ -54,7 +54,6 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
import org.apache.ratis.protocol.exceptions.GroupMismatchException;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assumptions;
@@ -189,67 +188,62 @@ public class TestOzoneClientRetriesOnExceptions {
}
@Test
- public void testMaxRetriesByOzoneClient() throws Exception {
+ void testMaxRetriesByOzoneClient() throws Exception {
String keyName = getKeyName();
- OzoneOutputStream key = createKey(
- keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize);
- KeyOutputStream keyOutputStream =
- assertInstanceOf(KeyOutputStream.class, key.getOutputStream());
- List<BlockOutputStreamEntry> entries = keyOutputStream.getStreamEntries();
- assertEquals((MAX_RETRIES + 1),
- keyOutputStream.getStreamEntries().size());
- int dataLength = maxFlushSize + 50;
- // write data more than 1 chunk
- byte[] data1 =
- ContainerTestHelper.getFixedLengthString(keyString, dataLength)
- .getBytes(UTF_8);
- long containerID;
- List<Long> containerList = new ArrayList<>();
- for (BlockOutputStreamEntry entry : entries) {
- containerID = entry.getBlockID().getContainerID();
- ContainerInfo container =
- cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueOf(containerID));
- Pipeline pipeline =
- cluster.getStorageContainerManager().getPipelineManager()
- .getPipeline(container.getPipelineID());
- XceiverClientSpi xceiverClient =
- xceiverClientManager.acquireClient(pipeline);
- Assumptions.assumeFalse(containerList.contains(containerID));
- containerList.add(containerID);
- xceiverClient.sendCommand(ContainerTestHelper
- .getCreateContainerRequest(containerID, pipeline));
- xceiverClientManager.releaseClient(xceiverClient, false);
- }
- key.write(data1);
- OutputStream stream = entries.get(0).getOutputStream();
- BlockOutputStream blockOutputStream =
assertInstanceOf(BlockOutputStream.class, stream);
- TestHelper.waitForContainerClose(key, cluster);
- // Ensure that blocks for the key have been allocated to at least N+1
- // containers so that write request will be tried on N+1 different blocks
- // of N+1 different containers and it will finally fail as it will hit
- // the max retry count of N.
- Assumptions.assumeTrue(containerList.size() > MAX_RETRIES,
- containerList.size() + " <= " + MAX_RETRIES);
- IOException ioe = assertThrows(IOException.class, () -> {
+ try (OzoneOutputStream key = createKey(
+ keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize)) {
+ KeyOutputStream keyOutputStream =
+ assertInstanceOf(KeyOutputStream.class, key.getOutputStream());
+ List<BlockOutputStreamEntry> entries =
keyOutputStream.getStreamEntries();
+ assertEquals((MAX_RETRIES + 1),
+ keyOutputStream.getStreamEntries().size());
+ int dataLength = maxFlushSize + 50;
+ // write data more than 1 chunk
+ byte[] data1 =
+ ContainerTestHelper.getFixedLengthString(keyString, dataLength)
+ .getBytes(UTF_8);
+ long containerID;
+ List<Long> containerList = new ArrayList<>();
+ for (BlockOutputStreamEntry entry : entries) {
+ containerID = entry.getBlockID().getContainerID();
+ ContainerInfo container =
+ cluster.getStorageContainerManager().getContainerManager()
+ .getContainer(ContainerID.valueOf(containerID));
+ Pipeline pipeline =
+ cluster.getStorageContainerManager().getPipelineManager()
+ .getPipeline(container.getPipelineID());
+ XceiverClientSpi xceiverClient =
+ xceiverClientManager.acquireClient(pipeline);
+ Assumptions.assumeFalse(containerList.contains(containerID));
+ containerList.add(containerID);
+ xceiverClient.sendCommand(ContainerTestHelper
+ .getCreateContainerRequest(containerID, pipeline));
+ xceiverClientManager.releaseClient(xceiverClient, false);
+ }
key.write(data1);
- // ensure that write is flushed to dn
- key.flush();
- });
- assertInstanceOf(ContainerNotOpenException.class,
- HddsClientUtils.checkForException(blockOutputStream.getIoException()));
- assertThat(ioe.getMessage()).contains(
- "Retry request failed. " +
- "retries get failed due to exceeded maximum " +
- "allowed retries number: " + MAX_RETRIES);
-
- ioe = assertThrows(IOException.class, () -> key.flush());
- assertThat(ioe.getMessage()).contains("Stream is closed");
+ OutputStream stream = entries.get(0).getOutputStream();
+ BlockOutputStream blockOutputStream =
assertInstanceOf(BlockOutputStream.class, stream);
+ TestHelper.waitForContainerClose(key, cluster);
+ // Ensure that blocks for the key have been allocated to at least N+1
+ // containers so that write request will be tried on N+1 different blocks
+ // of N+1 different containers and it will finally fail as it will hit
+ // the max retry count of N.
+ Assumptions.assumeTrue(containerList.size() > MAX_RETRIES,
+ containerList.size() + " <= " + MAX_RETRIES);
+ IOException ioe = assertThrows(IOException.class, () -> {
+ key.write(data1);
+ // ensure that write is flushed to dn
+ key.flush();
+ });
+ assertInstanceOf(ContainerNotOpenException.class,
+
HddsClientUtils.checkForException(blockOutputStream.getIoException()));
+ assertThat(ioe.getMessage()).contains(
+ "Retry request failed. " +
+ "retries get failed due to exceeded maximum " +
+ "allowed retries number: " + MAX_RETRIES);
- try {
- key.close();
- } catch (IOException e) {
- fail("Expected should not be thrown");
+ ioe = assertThrows(IOException.class, () -> key.flush());
+ assertThat(ioe.getMessage()).contains("Stream is closed");
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 7682138bbb..2adb310552 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -84,7 +84,6 @@ import org.apache.hadoop.ozone.client.VolumeArgs;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -1901,7 +1900,7 @@ public abstract class TestOzoneRpcClientAbstract {
// Make this executed at last, for it has some side effect to other UTs
@Test
@Flaky("HDDS-6151")
- public void testZReadKeyWithUnhealthyContainerReplica() throws Exception {
+ void testZReadKeyWithUnhealthyContainerReplica() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
@@ -1984,16 +1983,12 @@ public abstract class TestOzoneRpcClientAbstract {
}, 1000, 10000);
// Try reading keyName2
- try {
- GenericTestUtils.setLogLevel(XceiverClientGrpc.getLogger(), DEBUG);
- try (OzoneInputStream is = bucket.readKey(keyName2)) {
- byte[] content = new byte[100];
- is.read(content);
- String retValue = new String(content, UTF_8);
- assertEquals(value, retValue.trim());
- }
- } catch (IOException e) {
- fail("Reading unhealthy replica should succeed.");
+ GenericTestUtils.setLogLevel(XceiverClientGrpc.getLogger(), DEBUG);
+ try (OzoneInputStream is = bucket.readKey(keyName2)) {
+ byte[] content = new byte[100];
+ is.read(content);
+ String retValue = new String(content, UTF_8);
+ assertEquals(value, retValue.trim());
}
}
@@ -2002,7 +1997,7 @@ public abstract class TestOzoneRpcClientAbstract {
* @throws IOException
*/
@Test
- public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
+ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
@@ -2051,8 +2046,6 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] b = new byte[data.length];
is.read(b);
assertArrayEquals(b, data);
- } catch (OzoneChecksumException e) {
- fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(1), key);
// Try reading the key. Read will fail on the first node and will
eventually
@@ -2061,8 +2054,6 @@ public abstract class TestOzoneRpcClientAbstract {
byte[] b = new byte[data.length];
is.read(b);
assertArrayEquals(b, data);
- } catch (OzoneChecksumException e) {
- fail("Reading corrupted data should not fail.");
}
corruptData(containerList.get(2), key);
// Try reading the key. Read will fail here as all the replicas are corrupt
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
index 253193c92e..ffd80f359f 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.common.OzoneChecksumException;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
@@ -135,8 +134,6 @@ public class TestOzoneRpcClientWithRatis extends
TestOzoneRpcClientAbstract {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
assertArrayEquals(b, value.getBytes(UTF_8));
- } catch (OzoneChecksumException e) {
- fail("Read key should succeed");
}
// read file with topology aware read enabled
@@ -144,8 +141,6 @@ public class TestOzoneRpcClientWithRatis extends
TestOzoneRpcClientAbstract {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
assertArrayEquals(b, value.getBytes(UTF_8));
- } catch (OzoneChecksumException e) {
- fail("Read file should succeed");
}
// read key with topology aware read disabled
@@ -159,8 +154,6 @@ public class TestOzoneRpcClientWithRatis extends
TestOzoneRpcClientAbstract {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
assertArrayEquals(b, value.getBytes(UTF_8));
- } catch (OzoneChecksumException e) {
- fail("Read key should succeed");
}
// read file with topology aware read disabled
@@ -168,8 +161,6 @@ public class TestOzoneRpcClientWithRatis extends
TestOzoneRpcClientAbstract {
byte[] b = new byte[value.getBytes(UTF_8).length];
is.read(b);
assertArrayEquals(b, value.getBytes(UTF_8));
- } catch (OzoneChecksumException e) {
- fail("Read file should succeed");
}
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 7e55ea9cc4..7a64ddc5d5 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -48,7 +48,6 @@ import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
@@ -91,7 +90,7 @@ public class TestOzoneContainer {
}
@Test
- public void testOzoneContainerStart(
+ void testOzoneContainerStart(
@TempDir File ozoneMetaDir, @TempDir File hddsNodeDir) throws Exception {
OzoneConfiguration conf = newOzoneConfiguration();
OzoneContainer container = null;
@@ -110,18 +109,12 @@ public class TestOzoneContainer {
String clusterId = UUID.randomUUID().toString();
container.start(clusterId);
- try {
- container.start(clusterId);
- } catch (Exception e) {
- fail();
- }
+
+ container.start(clusterId);
+
+ container.stop();
container.stop();
- try {
- container.stop();
- } catch (Exception e) {
- fail();
- }
} finally {
if (container != null) {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java
index ae6fac2dee..055ddeb20c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java
@@ -578,7 +578,7 @@ public abstract class TestOmSnapshotFileSystem {
}
@Test
- public void testReadFileFromSnapshot() throws Exception {
+ void testReadFileFromSnapshot() throws Exception {
String keyName = "dir/file";
byte[] strBytes = "Sample text".getBytes(StandardCharsets.UTF_8);
Path parent = new Path("/");
@@ -608,8 +608,6 @@ public abstract class TestOmSnapshotFileSystem {
byte[] readBytes = new byte[strBytes.length];
System.arraycopy(buffer.array(), 0, readBytes, 0, strBytes.length);
assertArrayEquals(strBytes, readBytes);
- } catch (Exception e) {
- fail("Failed to read file, Exception : " + e);
}
deleteSnapshot(snapshotName);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java
index e734c77cfb..14b1a30b44 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java
@@ -126,7 +126,7 @@ public class TestScmSafeMode {
}
@Test
- public void testSafeModeOperations() throws Exception {
+ void testSafeModeOperations() throws Exception {
// Create {numKeys} random names keys.
TestStorageContainerManagerHelper helper =
new TestStorageContainerManagerHelper(cluster, conf);
@@ -148,12 +148,7 @@ public class TestScmSafeMode {
cluster.stop();
- try {
- cluster = builder.build();
- } catch (IOException e) {
- fail("failed");
- }
-
+ cluster = builder.build();
StorageContainerManager scm;
@@ -179,17 +174,13 @@ public class TestScmSafeMode {
* Tests inSafeMode & forceExitSafeMode api calls.
*/
@Test
- public void testIsScmInSafeModeAndForceExit() throws Exception {
+ void testIsScmInSafeModeAndForceExit() throws Exception {
// Test 1: SCM should be out of safe mode.
assertFalse(storageContainerLocationClient.inSafeMode());
cluster.stop();
// Restart the cluster with same metadata dir.
- try {
- cluster = builder.build();
- } catch (IOException e) {
- fail("Cluster startup failed.");
- }
+ cluster = builder.build();
// Test 2: Scm should be in safe mode as datanodes are not started yet.
storageContainerLocationClient = cluster
@@ -212,15 +203,12 @@ public class TestScmSafeMode {
}
@Test
- public void testSCMSafeMode() throws Exception {
+ void testSCMSafeMode() throws Exception {
// Test1: Test safe mode when there are no containers in system.
cluster.stop();
- try {
- cluster = builder.build();
- } catch (IOException e) {
- fail("Cluster startup failed.");
- }
+ cluster = builder.build();
+
assertTrue(cluster.getStorageContainerManager().isInSafeMode());
cluster.startHddsDatanodes();
cluster.waitForClusterToBeReady();
@@ -259,11 +247,7 @@ public class TestScmSafeMode {
.captureLogs(SCMSafeModeManager.getLogger());
logCapturer.clearOutput();
- try {
- cluster = builder.build();
- } catch (IOException ex) {
- fail("failed");
- }
+ cluster = builder.build();
StorageContainerManager scm;
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
index ec61970ee2..abc21ed435 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
@@ -80,7 +80,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doNothing;
@@ -279,7 +278,7 @@ public class TestRangerBGSyncService {
rolesCreated.add(0, role.getName());
}
- private void createRolesAndPoliciesInRanger(boolean populateDB) {
+ private void createRolesAndPoliciesInRanger(boolean populateDB) throws
IOException {
policiesCreated.clear();
rolesCreated.clear();
@@ -301,102 +300,75 @@ public class TestRangerBGSyncService {
// Add tenant entry in OM DB
if (populateDB) {
LOG.info("Creating OM DB tenant entries");
- try {
- // Tenant State entry
- omMetadataManager.getTenantStateTable().put(tenantId,
- new OmDBTenantState(
- tenantId, volumeName, userRoleName, adminRoleName,
- bucketNamespacePolicyName, bucketPolicyName));
- // Access ID entry for alice
- final String aliceAccessId = OMMultiTenantManager.getDefaultAccessId(
- tenantId, USER_ALICE_SHORT);
- omMetadataManager.getTenantAccessIdTable().put(aliceAccessId,
- new OmDBAccessIdInfo.Builder()
- .setTenantId(tenantId)
- .setUserPrincipal(USER_ALICE_SHORT)
- .setIsAdmin(false)
- .setIsDelegatedAdmin(false)
- .build());
- // Access ID entry for bob
- final String bobAccessId = OMMultiTenantManager.getDefaultAccessId(
- tenantId, USER_BOB_SHORT);
- omMetadataManager.getTenantAccessIdTable().put(bobAccessId,
- new OmDBAccessIdInfo.Builder()
- .setTenantId(tenantId)
- .setUserPrincipal(USER_BOB_SHORT)
- .setIsAdmin(false)
- .setIsDelegatedAdmin(false)
- .build());
- } catch (IOException e) {
- fail(e.getMessage());
- }
+ // Tenant State entry
+ omMetadataManager.getTenantStateTable().put(tenantId,
+ new OmDBTenantState(
+ tenantId, volumeName, userRoleName, adminRoleName,
+ bucketNamespacePolicyName, bucketPolicyName));
+ // Access ID entry for alice
+ final String aliceAccessId = OMMultiTenantManager.getDefaultAccessId(
+ tenantId, USER_ALICE_SHORT);
+ omMetadataManager.getTenantAccessIdTable().put(aliceAccessId,
+ new OmDBAccessIdInfo.Builder()
+ .setTenantId(tenantId)
+ .setUserPrincipal(USER_ALICE_SHORT)
+ .setIsAdmin(false)
+ .setIsDelegatedAdmin(false)
+ .build());
+ // Access ID entry for bob
+ final String bobAccessId = OMMultiTenantManager.getDefaultAccessId(
+ tenantId, USER_BOB_SHORT);
+ omMetadataManager.getTenantAccessIdTable().put(bobAccessId,
+ new OmDBAccessIdInfo.Builder()
+ .setTenantId(tenantId)
+ .setUserPrincipal(USER_BOB_SHORT)
+ .setIsAdmin(false)
+ .setIsDelegatedAdmin(false)
+ .build());
}
- try {
- LOG.info("Creating user in Ranger: {}", USER_ALICE_SHORT);
- rangerUserRequest.createUser(USER_ALICE_SHORT, "Password12");
- usersCreated.add(USER_ALICE_SHORT);
- } catch (IOException e) {
- fail(e.getMessage());
- }
- try {
- LOG.info("Creating user in Ranger: {}", USER_BOB_SHORT);
- rangerUserRequest.createUser(USER_BOB_SHORT, "Password12");
- usersCreated.add(USER_BOB_SHORT);
- } catch (IOException e) {
- fail(e.getMessage());
- }
+ LOG.info("Creating user in Ranger: {}", USER_ALICE_SHORT);
+ rangerUserRequest.createUser(USER_ALICE_SHORT, "Password12");
+ usersCreated.add(USER_ALICE_SHORT);
- try {
- LOG.info("Creating admin role in Ranger: {}", adminRoleName);
- // Create empty admin role first
- Role adminRole = new Role.Builder()
- .setName(adminRoleName)
- .setDescription(OZONE_TENANT_RANGER_ROLE_DESCRIPTION)
- .build();
- createRoleHelper(adminRole);
- } catch (IOException e) {
- fail(e.getMessage());
- }
-
- try {
- LOG.info("Creating user role in Ranger: {}", userRoleName);
- Role userRole = new Role.Builder()
- .setName(userRoleName)
- .setDescription(OZONE_TENANT_RANGER_ROLE_DESCRIPTION)
- .addRole(adminRoleName, true)
- // Add alice and bob to the user role
- .addUsers(Arrays.asList(USER_ALICE_SHORT, USER_BOB_SHORT))
- .build();
- createRoleHelper(userRole);
- } catch (IOException e) {
- fail(e.getMessage());
- }
-
- try {
- Policy tenant1VolumeAccessPolicy =
- OMMultiTenantManager.getDefaultVolumeAccessPolicy(
- tenantId, volumeName, userRoleName, adminRoleName);
- LOG.info("Creating VolumeAccess policy in Ranger: {}",
- tenant1VolumeAccessPolicy.getName());
- accessController.createPolicy(tenant1VolumeAccessPolicy);
- policiesCreated.add(tenant1VolumeAccessPolicy.getName());
- } catch (IOException e) {
- fail(e.getMessage());
- }
+ LOG.info("Creating user in Ranger: {}", USER_BOB_SHORT);
+ rangerUserRequest.createUser(USER_BOB_SHORT, "Password12");
+ usersCreated.add(USER_BOB_SHORT);
- try {
- Policy tenant1BucketCreatePolicy =
- OMMultiTenantManager.getDefaultBucketAccessPolicy(
- tenantId, volumeName, userRoleName);
- LOG.info("Creating BucketAccess policy in Ranger: {}",
- tenant1BucketCreatePolicy.getName());
- accessController.createPolicy(tenant1BucketCreatePolicy);
- policiesCreated.add(tenant1BucketCreatePolicy.getName());
- } catch (IOException e) {
- fail(e.getMessage());
- }
+ LOG.info("Creating admin role in Ranger: {}", adminRoleName);
+ // Create empty admin role first
+ Role adminRole = new Role.Builder()
+ .setName(adminRoleName)
+ .setDescription(OZONE_TENANT_RANGER_ROLE_DESCRIPTION)
+ .build();
+ createRoleHelper(adminRole);
+
+ LOG.info("Creating user role in Ranger: {}", userRoleName);
+ Role userRole = new Role.Builder()
+ .setName(userRoleName)
+ .setDescription(OZONE_TENANT_RANGER_ROLE_DESCRIPTION)
+ .addRole(adminRoleName, true)
+ // Add alice and bob to the user role
+ .addUsers(Arrays.asList(USER_ALICE_SHORT, USER_BOB_SHORT))
+ .build();
+ createRoleHelper(userRole);
+
+ Policy tenant1VolumeAccessPolicy =
+ OMMultiTenantManager.getDefaultVolumeAccessPolicy(
+ tenantId, volumeName, userRoleName, adminRoleName);
+ LOG.info("Creating VolumeAccess policy in Ranger: {}",
+ tenant1VolumeAccessPolicy.getName());
+ accessController.createPolicy(tenant1VolumeAccessPolicy);
+ policiesCreated.add(tenant1VolumeAccessPolicy.getName());
+
+ Policy tenant1BucketCreatePolicy =
+ OMMultiTenantManager.getDefaultBucketAccessPolicy(
+ tenantId, volumeName, userRoleName);
+ LOG.info("Creating BucketAccess policy in Ranger: {}",
+ tenant1BucketCreatePolicy.getName());
+ accessController.createPolicy(tenant1BucketCreatePolicy);
+ policiesCreated.add(tenant1BucketCreatePolicy.getName());
}
public void cleanupPolicies() {
@@ -507,7 +479,7 @@ public class TestRangerBGSyncService {
* Expect sync service to check Ranger state but write nothing to Ranger.
*/
@Test
- public void testConsistentState() throws Exception {
+ void testConsistentState() throws Exception {
long startingRangerVersion = initBGSync();
// Create roles and policies in ranger that are
@@ -535,23 +507,13 @@ public class TestRangerBGSyncService {
}
for (String policyName : policiesCreated) {
- try {
- final Policy policyRead = accessController.getPolicy(policyName);
- assertEquals(policyName, policyRead.getName());
- } catch (Exception e) {
- e.printStackTrace();
- fail(e.getMessage());
- }
+ final Policy policyRead = accessController.getPolicy(policyName);
+ assertEquals(policyName, policyRead.getName());
}
for (String roleName : rolesCreated) {
- try {
- final Role roleResponse = accessController.getRole(roleName);
- assertEquals(roleName, roleResponse.getName());
- } catch (Exception e) {
- e.printStackTrace();
- fail(e.getMessage());
- }
+ final Role roleResponse = accessController.getRole(roleName);
+ assertEquals(roleName, roleResponse.getName());
}
}
@@ -627,7 +589,7 @@ public class TestRangerBGSyncService {
* Expect sync service to recover both policies to their default states.
*/
@Test
- public void testRecreateDeletedRangerPolicy() throws Exception {
+ void testRecreateDeletedRangerPolicy() throws Exception {
long startingRangerVersion = initBGSync();
// Create roles and policies in ranger that are
@@ -662,23 +624,13 @@ public class TestRangerBGSyncService {
assertThat(rangerSvcVersionAfter).isGreaterThan(rangerSvcVersionBefore);
for (String policyName : policiesCreated) {
- try {
- final Policy policyRead = accessController.getPolicy(policyName);
- assertEquals(policyName, policyRead.getName());
- } catch (Exception e) {
- e.printStackTrace();
- fail(e.getMessage());
- }
+ final Policy policyRead = accessController.getPolicy(policyName);
+ assertEquals(policyName, policyRead.getName());
}
for (String roleName : rolesCreated) {
- try {
- final Role roleRead = accessController.getRole(roleName);
- assertEquals(roleName, roleRead.getName());
- } catch (Exception e) {
- e.printStackTrace();
- fail(e.getMessage());
- }
+ final Role roleRead = accessController.getRole(roleName);
+ assertEquals(roleName, roleRead.getName());
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
index 37c788a405..029b0813bb 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
@@ -272,7 +272,7 @@ public abstract class TestOmSnapshot {
* Trigger OM upgrade finalization from the client and block until completion
* (status FINALIZATION_DONE).
*/
- private void finalizeOMUpgrade() throws IOException {
+ private void finalizeOMUpgrade() throws Exception {
// Trigger OM upgrade finalization. Ref: FinalizeUpgradeSubCommand#call
final OzoneManagerProtocol omClient = client.getObjectStore()
.getClientProxy().getOzoneManagerClient();
@@ -284,17 +284,12 @@ public abstract class TestOmSnapshot {
assertTrue(isStarting(finalizationResponse.status()));
// Wait for the finalization to be marked as done.
// 10s timeout should be plenty.
- try {
- await(POLL_MAX_WAIT_MILLIS, POLL_INTERVAL_MILLIS, () -> {
- final UpgradeFinalizer.StatusAndMessages progress =
- omClient.queryUpgradeFinalizationProgress(
- upgradeClientID, false, false);
- return isDone(progress.status());
- });
- } catch (Exception e) {
- fail("Unexpected exception while waiting for "
- + "the OM upgrade to finalize: " + e.getMessage());
- }
+ await(POLL_MAX_WAIT_MILLIS, POLL_INTERVAL_MILLIS, () -> {
+ final UpgradeFinalizer.StatusAndMessages progress =
+ omClient.queryUpgradeFinalizationProgress(
+ upgradeClientID, false, false);
+ return isDone(progress.status());
+ });
}
@AfterAll
diff --git
a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestRepeatedOmKeyInfoCodec.java
b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestRepeatedOmKeyInfoCodec.java
index b2602f7bb0..efe7df3dec 100644
---
a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestRepeatedOmKeyInfoCodec.java
+++
b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestRepeatedOmKeyInfoCodec.java
@@ -39,7 +39,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test {@link RepeatedOmKeyInfo#getCodec(boolean)}.
@@ -87,7 +86,7 @@ public class TestRepeatedOmKeyInfoCodec
}
@Test
- public void test() throws InterruptedException {
+ void test() throws Exception {
threadSafety();
testWithoutPipeline(1);
testWithoutPipeline(2);
@@ -95,39 +94,28 @@ public class TestRepeatedOmKeyInfoCodec
testCompatibility(2);
}
- public void testWithoutPipeline(int chunkNum) {
+ public void testWithoutPipeline(int chunkNum) throws IOException {
final Codec<RepeatedOmKeyInfo> codec = RepeatedOmKeyInfo.getCodec(true);
OmKeyInfo originKey = getKeyInfo(chunkNum);
RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(originKey);
- try {
- byte[] rawData = codec.toPersistedFormat(repeatedOmKeyInfo);
- RepeatedOmKeyInfo key = codec.fromPersistedFormat(rawData);
- System.out.println("Chunk number = " + chunkNum +
- ", Serialized key size without pipeline = " + rawData.length);
- assertNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations()
- .getLocationList().get(0).getPipeline());
- } catch (IOException e) {
- fail("Should success");
- }
+
+ byte[] rawData = codec.toPersistedFormat(repeatedOmKeyInfo);
+ RepeatedOmKeyInfo key = codec.fromPersistedFormat(rawData);
+ assertNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations()
+ .getLocationList().get(0).getPipeline());
}
- public void testCompatibility(int chunkNum) {
+ public void testCompatibility(int chunkNum) throws IOException {
final Codec<RepeatedOmKeyInfo> codecWithoutPipeline
= RepeatedOmKeyInfo.getCodec(true);
final Codec<RepeatedOmKeyInfo> codecWithPipeline
= RepeatedOmKeyInfo.getCodec(false);
OmKeyInfo originKey = getKeyInfo(chunkNum);
RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(originKey);
- try {
- byte[] rawData = codecWithPipeline.toPersistedFormat(repeatedOmKeyInfo);
- RepeatedOmKeyInfo key =
codecWithoutPipeline.fromPersistedFormat(rawData);
- System.out.println("Chunk number = " + chunkNum +
- ", Serialized key size with pipeline = " + rawData.length);
- assertNotNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations()
- .getLocationList().get(0).getPipeline());
- } catch (IOException e) {
- fail("Should success");
- }
+ byte[] rawData = codecWithPipeline.toPersistedFormat(repeatedOmKeyInfo);
+ RepeatedOmKeyInfo key = codecWithoutPipeline.fromPersistedFormat(rawData);
+ assertNotNull(key.getOmKeyInfoList().get(0).getLatestVersionLocations()
+ .getLocationList().get(0).getPipeline());
}
public void threadSafety() throws InterruptedException {
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
index 3844fb45b3..f9c9c5ecc8 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
@@ -59,7 +59,6 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -295,7 +294,7 @@ class TestBucketManagerImpl extends OzoneTestBase {
}
@Test
- public void testDeleteBucket() throws Exception {
+ void testDeleteBucket() throws Exception {
String volume = volumeName();
createSampleVol(volume);
@@ -312,13 +311,9 @@ class TestBucketManagerImpl extends OzoneTestBase {
bucketManager.getBucketInfo(
volume, "bucket-" + i).getBucketName());
}
- try {
- writeClient.deleteBucket(volume, "bucket-1");
- assertNotNull(bucketManager.getBucketInfo(
- volume, "bucket-2"));
- } catch (IOException ex) {
- fail(ex.getMessage());
- }
+ writeClient.deleteBucket(volume, "bucket-1");
+ assertNotNull(bucketManager.getBucketInfo(volume, "bucket-2"));
+
OMException omEx = assertThrows(OMException.class, () -> {
bucketManager.getBucketInfo(volume, "bucket-1");
});
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java
index 23dd15b610..ecada6ea28 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java
@@ -42,7 +42,6 @@ import static
org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Testing OMStorage class.
@@ -103,16 +102,12 @@ public class TestOMStorage {
}
@Test
- public void testSetOmIdOnNotInitializedStorage() throws Exception {
+ void testSetOmIdOnNotInitializedStorage() throws Exception {
OMStorage storage = new OMStorage(configWithOMDBDir());
assertNotEquals(INITIALIZED, storage.getState());
String omId = "omId";
- try {
- storage.setOmId(omId);
- } catch (IOException e) {
- fail("Can not set OmId on a Storage that is not initialized.");
- }
+ storage.setOmId(omId);
assertEquals(omId, storage.getOmId());
assertGetNodeProperties(storage, omId);
}
@@ -145,16 +140,12 @@ public class TestOMStorage {
}
@Test
- public void testSetOmNodeIdOnNotInitializedStorage() throws Exception {
+ void testSetOmNodeIdOnNotInitializedStorage() throws Exception {
OMStorage storage = new OMStorage(configWithOMDBDir());
assertNotEquals(INITIALIZED, storage.getState());
String nodeId = "nodeId";
- try {
- storage.setOmNodeId(nodeId);
- } catch (IOException e) {
- fail("Can not set OmNodeId on a Storage that is not initialized.");
- }
+ storage.setOmNodeId(nodeId);
assertEquals(nodeId, storage.getOmNodeId());
assertGetNodeProperties(storage, null, nodeId);
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java
index 58c3da3eb0..9d8de4bbb2 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java
@@ -37,12 +37,10 @@ import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
-import java.io.IOException;
import java.nio.file.Path;
import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* This class tests OMVolumeCreateResponse.
@@ -110,7 +108,7 @@ public class TestOMVolumeCreateResponse {
}
@Test
- public void testAddToDBBatchNoOp() throws Exception {
+ void testAddToDBBatchNoOp() throws Exception {
OMResponse omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
@@ -122,15 +120,10 @@ public class TestOMVolumeCreateResponse {
OMVolumeCreateResponse omVolumeCreateResponse =
new OMVolumeCreateResponse(omResponse);
- try {
- omVolumeCreateResponse.checkAndUpdateDB(omMetadataManager,
- batchOperation);
- assertEquals(0, omMetadataManager.countRowsInTable(
- omMetadataManager.getVolumeTable()));
- } catch (IOException ex) {
- fail("testAddToDBBatchFailure failed");
- }
-
+ omVolumeCreateResponse.checkAndUpdateDB(omMetadataManager,
+ batchOperation);
+ assertEquals(0, omMetadataManager.countRowsInTable(
+ omMetadataManager.getVolumeTable()));
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java
index 448061d399..aa640067ca 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java
@@ -37,12 +37,10 @@ import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
-import java.io.IOException;
import java.nio.file.Path;
import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* This class tests OMVolumeCreateResponse.
@@ -140,7 +138,7 @@ public class TestOMVolumeSetOwnerResponse {
}
@Test
- public void testAddToDBBatchNoOp() throws Exception {
+ void testAddToDBBatchNoOp() throws Exception {
OMResponse omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty)
@@ -152,15 +150,10 @@ public class TestOMVolumeSetOwnerResponse {
OMVolumeSetOwnerResponse omVolumeSetOwnerResponse =
new OMVolumeSetOwnerResponse(omResponse);
- try {
- omVolumeSetOwnerResponse.checkAndUpdateDB(omMetadataManager,
- batchOperation);
- assertEquals(0, omMetadataManager.countRowsInTable(
- omMetadataManager.getVolumeTable()));
- } catch (IOException ex) {
- fail("testAddToDBBatchFailure failed");
- }
-
+ omVolumeSetOwnerResponse.checkAndUpdateDB(omMetadataManager,
+ batchOperation);
+ assertEquals(0, omMetadataManager.countRowsInTable(
+ omMetadataManager.getVolumeTable()));
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java
index fc4121b3a3..fbc8e3c944 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java
@@ -36,12 +36,10 @@ import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
-import java.io.IOException;
import java.nio.file.Path;
import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* This class tests OMVolumeCreateResponse.
@@ -108,7 +106,7 @@ public class TestOMVolumeSetQuotaResponse {
}
@Test
- public void testAddToDBBatchNoOp() throws Exception {
+ void testAddToDBBatchNoOp() throws Exception {
OMResponse omResponse = OMResponse.newBuilder()
.setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume)
@@ -120,14 +118,10 @@ public class TestOMVolumeSetQuotaResponse {
OMVolumeSetQuotaResponse omVolumeSetQuotaResponse =
new OMVolumeSetQuotaResponse(omResponse);
- try {
- omVolumeSetQuotaResponse.checkAndUpdateDB(omMetadataManager,
- batchOperation);
- assertEquals(0, omMetadataManager.countRowsInTable(
- omMetadataManager.getVolumeTable()));
- } catch (IOException ex) {
- fail("testAddToDBBatchFailure failed");
- }
+ omVolumeSetQuotaResponse.checkAndUpdateDB(omMetadataManager,
+ batchOperation);
+ assertEquals(0, omMetadataManager.countRowsInTable(
+ omMetadataManager.getVolumeTable()));
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
index a4e4447c7d..c09c51a624 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.security;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
import javax.crypto.KeyGenerator;
import javax.crypto.Mac;
import javax.crypto.SecretKey;
@@ -298,7 +297,7 @@ public class TestOzoneTokenIdentifier {
}
@Test
- public void testTokenPersistence() throws IOException {
+ void testTokenPersistence() throws IOException {
OzoneTokenIdentifier idWrite = getIdentifierInst();
idWrite.setOmServiceId("defaultServiceId");
@@ -306,11 +305,7 @@ public class TestOzoneTokenIdentifier {
Codec<OzoneTokenIdentifier> idCodec = TokenIdentifierCodec.get();
OzoneTokenIdentifier idRead = null;
- try {
- idRead = idCodec.fromPersistedFormat(oldIdBytes);
- } catch (IOException ex) {
- fail("Should not fail to load old token format");
- }
+ idRead = idCodec.fromPersistedFormat(oldIdBytes);
assertEquals(idWrite, idRead,
"Deserialize Serialized Token should equal.");
}
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java
index 998b5c8cfc..c42036cb1a 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java
@@ -131,11 +131,11 @@ public class TestAuthorizationFilter {
@SuppressWarnings("checkstyle:ParameterNumber")
@ParameterizedTest
@MethodSource("testAuthFilterFailuresInput")
- public void testAuthFilterFailures(
+ void testAuthFilterFailures(
String method, String authHeader, String contentMd5,
String host, String amzContentSha256, String date, String contentType,
String path, String expectedErrorMsg
- ) {
+ ) throws Exception {
try {
ContainerRequestContext context = setupContext(method, authHeader,
contentMd5, host, amzContentSha256, date, contentType, path);
@@ -169,8 +169,6 @@ public class TestAuthorizationFilter {
}
- } catch (Exception ex) {
- fail("Unexpected exception: " + ex);
}
}
@@ -238,58 +236,54 @@ public class TestAuthorizationFilter {
@SuppressWarnings("checkstyle:ParameterNumber")
@ParameterizedTest
@MethodSource("testAuthFilterInput")
- public void testAuthFilter(
+ void testAuthFilter(
String method, String authHeader, String contentMd5,
String host, String amzContentSha256, String date, String contentType,
String path
- ) {
- try {
- ContainerRequestContext context = setupContext(method, authHeader,
- contentMd5, host, amzContentSha256, date, contentType, path);
-
- AWSSignatureProcessor awsSignatureProcessor = new
AWSSignatureProcessor();
- awsSignatureProcessor.setContext(context);
-
- SignatureInfo signatureInfo = new SignatureInfo();
-
- authorizationFilter.setSignatureParser(awsSignatureProcessor);
- authorizationFilter.setSignatureInfo(signatureInfo);
-
- authorizationFilter.filter(context);
-
- if (path.startsWith("/secret")) {
- assertNull(authorizationFilter.getSignatureInfo().getUnfilteredURI());
-
- assertNull(authorizationFilter.getSignatureInfo().getStringToSign());
- } else {
- String canonicalRequest = method + "\n"
- + path + "\n"
- + "\n"
- + "host:" + host + "\nx-amz-content-sha256:" + amzContentSha256 +
- "\n"
- + "x-amz-date:" + DATETIME + "\n"
- + "\n"
- + "host;x-amz-content-sha256;x-amz-date\n"
- + amzContentSha256;
-
- MessageDigest md = MessageDigest.getInstance("SHA-256");
- md.update(canonicalRequest.getBytes(StandardCharsets.UTF_8));
-
- String expectedStrToSign = "AWS4-HMAC-SHA256\n"
- + DATETIME + "\n"
- + CURDATE + "/us-east-1/s3/aws4_request\n"
- + Hex.encode(md.digest()).toLowerCase();
-
- assertEquals(path,
- authorizationFilter.getSignatureInfo().getUnfilteredURI(),
- "Unfiltered URI is not preserved");
-
- assertEquals(expectedStrToSign,
- authorizationFilter.getSignatureInfo().getStringToSign(),
- "String to sign is invalid");
- }
- } catch (Exception ex) {
- fail("Unexpected exception: " + ex);
+ ) throws Exception {
+ ContainerRequestContext context = setupContext(method, authHeader,
+ contentMd5, host, amzContentSha256, date, contentType, path);
+
+ AWSSignatureProcessor awsSignatureProcessor = new AWSSignatureProcessor();
+ awsSignatureProcessor.setContext(context);
+
+ SignatureInfo signatureInfo = new SignatureInfo();
+
+ authorizationFilter.setSignatureParser(awsSignatureProcessor);
+ authorizationFilter.setSignatureInfo(signatureInfo);
+
+ authorizationFilter.filter(context);
+
+ if (path.startsWith("/secret")) {
+ assertNull(authorizationFilter.getSignatureInfo().getUnfilteredURI());
+
+ assertNull(authorizationFilter.getSignatureInfo().getStringToSign());
+ } else {
+ String canonicalRequest = method + "\n"
+ + path + "\n"
+ + "\n"
+ + "host:" + host + "\nx-amz-content-sha256:" + amzContentSha256 +
+ "\n"
+ + "x-amz-date:" + DATETIME + "\n"
+ + "\n"
+ + "host;x-amz-content-sha256;x-amz-date\n"
+ + amzContentSha256;
+
+ MessageDigest md = MessageDigest.getInstance("SHA-256");
+ md.update(canonicalRequest.getBytes(StandardCharsets.UTF_8));
+
+ String expectedStrToSign = "AWS4-HMAC-SHA256\n"
+ + DATETIME + "\n"
+ + CURDATE + "/us-east-1/s3/aws4_request\n"
+ + Hex.encode(md.digest()).toLowerCase();
+
+ assertEquals(path,
+ authorizationFilter.getSignatureInfo().getUnfilteredURI(),
+ "Unfiltered URI is not preserved");
+
+ assertEquals(expectedStrToSign,
+ authorizationFilter.getSignatureInfo().getStringToSign(),
+ "String to sign is invalid");
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]