This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 2ac838ece8 HDDS-10069. Add static import for assertions and mocks in
client integration tests (#5955)
2ac838ece8 is described below
commit 2ac838ece8a9fb08a3f02c4276815553e3f7448a
Author: Zhaohui Wang <[email protected]>
AuthorDate: Tue Jan 9 16:46:50 2024 +0800
HDDS-10069. Add static import for assertions and mocks in client
integration tests (#5955)
---
.../ozone/client/TestOzoneClientFactory.java | 4 +-
.../client/rpc/AbstractTestECKeyOutputStream.java | 32 ++---
.../ozone/client/rpc/Test2WayCommitInRatis.java | 16 +--
.../apache/hadoop/ozone/client/rpc/TestBCSID.java | 11 +-
.../client/rpc/TestBlockDataStreamOutput.java | 15 +--
.../rpc/TestCloseContainerHandlingByClient.java | 48 ++++----
.../rpc/TestContainerReplicationEndToEnd.java | 13 +-
.../client/rpc/TestContainerStateMachine.java | 14 ++-
.../TestContainerStateMachineFailureOnRead.java | 13 +-
.../rpc/TestContainerStateMachineFlushDelay.java | 7 +-
.../rpc/TestContainerStateMachineStream.java | 6 +-
.../client/rpc/TestDeleteWithInAdequateDN.java | 26 ++--
.../client/rpc/TestDiscardPreallocatedBlocks.java | 18 +--
.../rpc/TestFailureHandlingByClientFlushDelay.java | 21 ++--
.../client/rpc/TestHybridPipelineOnDatanode.java | 20 +--
.../rpc/TestMultiBlockWritesWithDnFailures.java | 15 +--
.../client/rpc/TestOzoneAtRestEncryption.java | 7 +-
.../rpc/TestOzoneClientMultipartUploadWithFSO.java | 135 +++++++++++----------
...estOzoneClientRetriesOnExceptionFlushDelay.java | 17 +--
.../rpc/TestOzoneClientRetriesOnExceptions.java | 36 +++---
.../rpc/TestOzoneRpcClientForAclAuditLog.java | 5 +-
.../client/rpc/TestValidateBCSIDOnRestart.java | 23 ++--
22 files changed, 265 insertions(+), 237 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
index 70ccf28945..5e3c3ab5a7 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
@@ -16,11 +16,11 @@
*/
package org.apache.hadoop.ozone.client;
+import static org.junit.jupiter.api.Assertions.fail;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import java.io.IOException;
@@ -66,7 +66,7 @@ public class TestOzoneClientFactory {
return null;
}
});
- Assertions.fail("Should throw exception here");
+ fail("Should throw exception here");
} catch (IOException | InterruptedException e) {
assert e instanceof AccessControlException;
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
index 9691a31efb..2bbee25f97 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -62,6 +61,11 @@ import java.util.concurrent.TimeoutException;
import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* Tests key output stream.
@@ -154,7 +158,7 @@ abstract class AbstractTestECKeyOutputStream {
.createKey(keyString, new ECReplicationConfig(3, 2,
ECReplicationConfig.EcCodec.RS, chunkSize), inputSize,
objectStore, volumeName, bucketName)) {
- Assertions.assertTrue(key.getOutputStream() instanceof
ECKeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof ECKeyOutputStream);
}
}
@@ -163,7 +167,7 @@ abstract class AbstractTestECKeyOutputStream {
OzoneVolume volume = objectStore.getVolume(volumeName);
OzoneBucket bucket = volume.getBucket(bucketName);
try (OzoneOutputStream out = bucket.createKey("myKey", inputSize)) {
- Assertions.assertTrue(out.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(out.getOutputStream() instanceof KeyOutputStream);
for (byte[] inputChunk : inputChunks) {
out.write(inputChunk);
}
@@ -184,7 +188,7 @@ abstract class AbstractTestECKeyOutputStream {
OzoneBucket bucket = volume.getBucket(myBucket);
try (OzoneOutputStream out = bucket.createKey(keyString, inputSize)) {
- Assertions.assertTrue(out.getOutputStream() instanceof
ECKeyOutputStream);
+ assertTrue(out.getOutputStream() instanceof ECKeyOutputStream);
for (byte[] inputChunk : inputChunks) {
out.write(inputChunk);
}
@@ -193,8 +197,8 @@ abstract class AbstractTestECKeyOutputStream {
try (OzoneInputStream in = bucket.readKey(keyString)) {
for (byte[] inputChunk : inputChunks) {
int read = in.read(buf, 0, chunkSize);
- Assertions.assertEquals(chunkSize, read);
- Assertions.assertArrayEquals(buf, inputChunk);
+ assertEquals(chunkSize, read);
+ assertArrayEquals(buf, inputChunk);
}
}
}
@@ -245,7 +249,7 @@ abstract class AbstractTestECKeyOutputStream {
}
}
OzoneKeyDetails key = bucket.getKey(keyName);
- Assertions.assertEquals(replicationConfig, key.getReplicationConfig());
+ assertEquals(replicationConfig, key.getReplicationConfig());
}
@Test
@@ -255,7 +259,7 @@ abstract class AbstractTestECKeyOutputStream {
"testCreateRatisKeyAndWithECBucketDefaults", 2000,
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE),
new HashMap<>())) {
- Assertions.assertTrue(out.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(out.getOutputStream() instanceof KeyOutputStream);
for (byte[] inputChunk : inputChunks) {
out.write(inputChunk);
}
@@ -344,7 +348,7 @@ abstract class AbstractTestECKeyOutputStream {
.getNumberOfKeys() == 1) && (containerOperationClient
.getContainerReplicas(currentKeyContainerID).size() == 5);
} catch (IOException exception) {
- Assertions.fail("Unexpected exception " + exception);
+ fail("Unexpected exception " + exception);
return false;
}
}, 100, 10000);
@@ -361,8 +365,8 @@ abstract class AbstractTestECKeyOutputStream {
OzoneKey key) throws IOException {
try (OzoneInputStream is = bucket.readKey(key.getName())) {
byte[] fileContent = new byte[length];
- Assertions.assertEquals(length, is.read(fileContent));
- Assertions.assertEquals(new String(Arrays.copyOfRange(inputData, offset,
+ assertEquals(length, is.read(fileContent));
+ assertEquals(new String(Arrays.copyOfRange(inputData, offset,
offset + length), UTF_8),
new String(fileContent, UTF_8));
}
@@ -423,7 +427,7 @@ abstract class AbstractTestECKeyOutputStream {
// Check the second blockGroup pipeline to make sure that the failed
// node is not selected.
- Assertions.assertFalse(ecOut.getStreamEntries()
+ assertFalse(ecOut.getStreamEntries()
.get(1).getPipeline().getNodes().contains(nodeToKill));
}
@@ -432,8 +436,8 @@ abstract class AbstractTestECKeyOutputStream {
// data comes back.
for (int i = 0; i < 2; i++) {
byte[] fileContent = new byte[inputData.length];
- Assertions.assertEquals(inputData.length, is.read(fileContent));
- Assertions.assertEquals(new String(inputData, UTF_8),
+ assertEquals(inputData.length, is.read(fileContent));
+ assertEquals(new String(inputData, UTF_8),
new String(fileContent, UTF_8));
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
index 8e87f6207f..f2a5d75848 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -51,6 +50,8 @@ import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.
OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* This class tests the 2 way commit in Ratis.
@@ -145,8 +146,8 @@ public class Test2WayCommitInRatis {
HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
XceiverClientSpi xceiverClient = clientManager
.acquireClient(container1.getPipeline());
- Assertions.assertEquals(1, xceiverClient.getRefcount());
- Assertions.assertEquals(container1.getPipeline(),
+ assertEquals(1, xceiverClient.getRefcount());
+ assertEquals(container1.getPipeline(),
xceiverClient.getPipeline());
Pipeline pipeline = xceiverClient.getPipeline();
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
@@ -155,7 +156,7 @@ public class Test2WayCommitInRatis {
container1.getContainerInfo().getContainerID(),
xceiverClient.getPipeline()));
reply.getResponse().get();
- Assertions.assertEquals(3, ratisClient.getCommitInfoMap().size());
+ assertEquals(3, ratisClient.getCommitInfoMap().size());
// wait for the container to be created on all the nodes
xceiverClient.watchForCommit(reply.getLogIndex());
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
@@ -172,11 +173,10 @@ public class Test2WayCommitInRatis {
xceiverClient.watchForCommit(reply.getLogIndex());
// commitInfo Map will be reduced to 2 here
- Assertions.assertEquals(2, ratisClient.getCommitInfoMap().size());
+ assertEquals(2, ratisClient.getCommitInfoMap().size());
clientManager.releaseClient(xceiverClient, false);
- Assertions.assertTrue(logCapturer.getOutput().contains("3 way commit
failed"));
- Assertions
- .assertTrue(logCapturer.getOutput().contains("Committed by majority"));
+ assertTrue(logCapturer.getOutput().contains("3 way commit failed"));
+ assertTrue(logCapturer.getOutput().contains("Committed by majority"));
logCapturer.stopCapturing();
shutdown();
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
index 1917cf68fd..4245f460be 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -53,6 +52,8 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_SCM_SAFEMODE_PIPELINE_CREATION;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests the validity BCSID of a container.
@@ -130,7 +131,7 @@ public class TestBCSID {
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
List<OmKeyLocationInfo> keyLocationInfos =
keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
- Assertions.assertEquals(1, keyLocationInfos.size());
+ assertEquals(1, keyLocationInfos.size());
OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(0);
long blockCommitSequenceId =
@@ -138,16 +139,16 @@ public class TestBCSID {
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
.getContainerReport().getBlockCommitSequenceId();
- Assertions.assertTrue(blockCommitSequenceId > 0);
+ assertTrue(blockCommitSequenceId > 0);
// make sure the persisted block Id in OM is same as that seen in the
// container report to be reported to SCM.
- Assertions.assertEquals(blockCommitSequenceId,
+ assertEquals(blockCommitSequenceId,
omKeyLocationInfo.getBlockCommitSequenceId());
// verify that on restarting the datanode, it reloads the BCSID correctly.
cluster.restartHddsDatanode(0, true);
- Assertions.assertEquals(blockCommitSequenceId,
+ assertEquals(blockCommitSequenceId,
cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index a77218d891..017b0fb583 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -38,7 +38,6 @@ import
org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.TestHelper;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -50,6 +49,8 @@ import java.util.concurrent.TimeUnit;
import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests BlockDataStreamOutput class.
@@ -176,7 +177,7 @@ public class TestBlockDataStreamOutput {
(KeyDataStreamOutput) key.getByteBufStreamOutput();
ByteBufferStreamOutput stream =
keyDataStreamOutput.getStreamEntries().get(0).getByteBufStreamOutput();
- Assertions.assertTrue(stream instanceof BlockDataStreamOutput);
+ assertTrue(stream instanceof BlockDataStreamOutput);
TestHelper.waitForContainerClose(key, cluster);
key.write(b);
key.close();
@@ -200,13 +201,13 @@ public class TestBlockDataStreamOutput {
ContainerTestHelper.getFixedLengthString(keyString, dataLength)
.getBytes(UTF_8);
key.write(ByteBuffer.wrap(data));
- Assertions.assertTrue(
+ assertTrue(
metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock)
<= pendingPutBlockCount + 1);
key.close();
// Since data length is 500 , first putBlock will be at 400(flush boundary)
// and the other at 500
- Assertions.assertEquals(
+ assertEquals(
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock),
putBlockCount + 2);
validateData(keyName, data);
@@ -237,10 +238,10 @@ public class TestBlockDataStreamOutput {
.getBytes(UTF_8);
key.write(ByteBuffer.wrap(data));
// minPacketSize= 100, so first write of 50 wont trigger a writeChunk
- Assertions.assertEquals(writeChunkCount,
+ assertEquals(writeChunkCount,
metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
key.write(ByteBuffer.wrap(data));
- Assertions.assertEquals(writeChunkCount + 1,
+ assertEquals(writeChunkCount + 1,
metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
// now close the stream, It will update the key length.
key.close();
@@ -263,7 +264,7 @@ public class TestBlockDataStreamOutput {
keyDataStreamOutput.getStreamEntries().get(0);
key.write(ByteBuffer.wrap(data));
key.close();
- Assertions.assertEquals(dataLength, stream.getTotalAckDataLength());
+ assertEquals(dataLength, stream.getTotalAckDataLength());
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index 63c9b275b3..e3353b9bf0 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -53,8 +53,10 @@ import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -133,7 +135,7 @@ public class TestCloseContainerHandlingByClient {
.getBytes(UTF_8);
key.write(data);
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
//get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -148,7 +150,7 @@ public class TestCloseContainerHandlingByClient {
// read the key from OM again and match the length.The length will still
// be the equal to the original data size.
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assertions.assertEquals(2 * data.length, keyInfo.getDataSize());
+ assertEquals(2 * data.length, keyInfo.getDataSize());
// Written the same data twice
String dataString = new String(data, UTF_8);
@@ -166,7 +168,7 @@ public class TestCloseContainerHandlingByClient {
.getBytes(UTF_8);
key.write(data);
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
//get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -179,7 +181,7 @@ public class TestCloseContainerHandlingByClient {
// read the key from OM again and match the length.The length will still
// be the equal to the original data size.
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assertions.assertEquals(data.length, keyInfo.getDataSize());
+ assertEquals(data.length, keyInfo.getDataSize());
validateData(keyName, data);
}
@@ -192,15 +194,15 @@ public class TestCloseContainerHandlingByClient {
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
// With the initial size provided, it should have preallocated 4 blocks
- Assertions.assertEquals(3, keyOutputStream.getStreamEntries().size());
+ assertEquals(3, keyOutputStream.getStreamEntries().size());
// write data more than 1 block
byte[] data =
ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize))
.getBytes(UTF_8);
- Assertions.assertEquals(data.length, 3 * blockSize);
+ assertEquals(data.length, 3 * blockSize);
key.write(data);
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
//get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -224,10 +226,10 @@ public class TestCloseContainerHandlingByClient {
// closeContainerException and remaining data in the chunkOutputStream
// buffer will be copied into a different allocated block and will be
// committed.
- Assertions.assertEquals(4, keyLocationInfos.size());
- Assertions.assertEquals(4 * blockSize, keyInfo.getDataSize());
+ assertEquals(4, keyLocationInfos.size());
+ assertEquals(4 * blockSize, keyInfo.getDataSize());
for (OmKeyLocationInfo locationInfo : keyLocationInfos) {
- Assertions.assertEquals(blockSize, locationInfo.getLength());
+ assertEquals(blockSize, locationInfo.getLength());
}
}
@@ -239,9 +241,9 @@ public class TestCloseContainerHandlingByClient {
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// With the initial size provided, it should have pre allocated 2 blocks
- Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size());
+ assertEquals(2, keyOutputStream.getStreamEntries().size());
String dataString =
ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize));
byte[] data = dataString.getBytes(UTF_8);
@@ -281,7 +283,7 @@ public class TestCloseContainerHandlingByClient {
String dataCommitted =
dataString.concat(dataString2).concat(dataString3).concat(dataString4);
- Assertions.assertEquals(dataCommitted.getBytes(UTF_8).length,
+ assertEquals(dataCommitted.getBytes(UTF_8).length,
keyInfo.getDataSize());
validateData(keyName, dataCommitted.getBytes(UTF_8));
}
@@ -295,16 +297,16 @@ public class TestCloseContainerHandlingByClient {
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
// With the initial size provided, it should have preallocated 4 blocks
- Assertions.assertEquals(4, keyOutputStream.getStreamEntries().size());
+ assertEquals(4, keyOutputStream.getStreamEntries().size());
// write data 4 blocks and one more chunk
byte[] writtenData =
ContainerTestHelper.getFixedLengthString(keyString, keyLen)
.getBytes(UTF_8);
byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize +
chunkSize);
- Assertions.assertEquals(data.length, 3 * blockSize + chunkSize);
+ assertEquals(data.length, 3 * blockSize + chunkSize);
key.write(data);
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
//get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -329,7 +331,7 @@ public class TestCloseContainerHandlingByClient {
try (OzoneInputStream inputStream = bucket.readKey(keyName)) {
inputStream.read(readData);
}
- Assertions.assertArrayEquals(writtenData, readData);
+ assertArrayEquals(writtenData, readData);
// Though we have written only block initially, the close will hit
// closeContainerException and remaining data in the chunkOutputStream
@@ -339,7 +341,7 @@ public class TestCloseContainerHandlingByClient {
for (OmKeyLocationInfo locationInfo : keyLocationInfos) {
length += locationInfo.getLength();
}
- Assertions.assertEquals(4 * blockSize, length);
+ assertEquals(4 * blockSize, length);
}
private void waitForContainerClose(OzoneOutputStream outputStream)
@@ -375,7 +377,7 @@ public class TestCloseContainerHandlingByClient {
.setKeyName(keyName)
.build();
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
waitForContainerClose(key);
// Again Write the Data. This will throw an exception which will be handled
// and new blocks will be allocated
@@ -387,7 +389,7 @@ public class TestCloseContainerHandlingByClient {
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
String dataString = new String(data, UTF_8);
dataString = dataString.concat(dataString);
- Assertions.assertEquals(2 * data.length, keyInfo.getDataSize());
+ assertEquals(2 * data.length, keyInfo.getDataSize());
validateData(keyName, dataString.getBytes(UTF_8));
}
@@ -401,7 +403,7 @@ public class TestCloseContainerHandlingByClient {
.getBytes(UTF_8);
key.write(data1);
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
//get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -419,7 +421,7 @@ public class TestCloseContainerHandlingByClient {
// read the key from OM again and match the length.The length will still
// be the equal to the original data size.
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assertions.assertEquals((long) 5 * chunkSize, keyInfo.getDataSize());
+ assertEquals((long) 5 * chunkSize, keyInfo.getDataSize());
// Written the same data twice
String dataString = new String(data1, UTF_8);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
index 9a351e77e9..d012bfd524 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.slf4j.LoggerFactory;
@@ -63,6 +62,10 @@ import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERV
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests delete key operation with a slow follower in the datanode
@@ -163,7 +166,7 @@ public class TestContainerReplicationEndToEnd {
KeyOutputStream groupOutputStream = (KeyOutputStream)
key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList =
groupOutputStream.getLocationInfoList();
- Assertions.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
long containerID = omKeyLocationInfo.getContainerID();
PipelineID pipelineID =
@@ -205,7 +208,7 @@ public class TestContainerReplicationEndToEnd {
}
// wait for container to move to closed state in SCM
Thread.sleep(2 * containerReportInterval);
- Assertions.assertSame(
+ assertSame(
cluster.getStorageContainerManager().getContainerInfo(containerID)
.getState(), HddsProtos.LifeCycleState.CLOSED);
// shutdown the replica node
@@ -221,14 +224,14 @@ public class TestContainerReplicationEndToEnd {
}
}
- Assertions.assertNotNull(dnService);
+ assertNotNull(dnService);
final HddsDatanodeService newReplicaNode = dnService;
// wait for the container to get replicated
GenericTestUtils.waitFor(() -> {
return newReplicaNode.getDatanodeStateMachine().getContainer()
.getContainerSet().getContainer(containerID) != null;
}, 500, 100000);
-
Assertions.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer()
+ assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer()
.getContainerSet().getContainer(containerID).getContainerData()
.getBlockCommitSequenceId() > 0);
// wait for SCM to update the replica Map
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
index 1050fdd7f2..f1aae8e4d0 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
@@ -54,11 +54,13 @@ import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
import org.apache.ratis.statemachine.impl.StatemachineImplTestUtil;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -156,7 +158,7 @@ public class TestContainerStateMachine {
List<OmKeyLocationInfo> locationInfoList =
groupOutputStream.getLocationInfoList();
- Assertions.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
// delete the container dir
@@ -168,7 +170,7 @@ public class TestContainerStateMachine {
key.close();
// Make sure the container is marked unhealthy
- Assertions.assertEquals(
+ assertEquals(
ContainerProtos.ContainerDataProto.State.UNHEALTHY,
cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
.getContainer().getContainerSet()
@@ -183,7 +185,7 @@ public class TestContainerStateMachine {
(ContainerStateMachine) TestHelper.getStateMachine(cluster);
SimpleStateMachineStorage storage =
(SimpleStateMachineStorage) stateMachine.getStateMachineStorage();
-
Assertions.assertNull(StatemachineImplTestUtil.findLatestSnapshot(storage));
+ assertNull(StatemachineImplTestUtil.findLatestSnapshot(storage));
// Write 10 keys. Num snapshots should be equal to config value.
for (int i = 1; i <= 10; i++) {
@@ -207,7 +209,7 @@ public class TestContainerStateMachine {
storage = (SimpleStateMachineStorage)
stateMachine.getStateMachineStorage();
Path parentPath = getSnapshotPath(storage);
int numSnapshots = parentPath.getParent().toFile().listFiles().length;
- Assertions.assertTrue(Math.abs(ratisServerConfiguration
+ assertTrue(Math.abs(ratisServerConfiguration
.getNumSnapshotsRetained() - numSnapshots) <= 1);
// Write 10 more keys. Num Snapshots should remain the same.
@@ -228,7 +230,7 @@ public class TestContainerStateMachine {
storage = (SimpleStateMachineStorage)
stateMachine.getStateMachineStorage();
parentPath = getSnapshotPath(storage);
numSnapshots = parentPath.getParent().toFile().listFiles().length;
- Assertions.assertTrue(Math.abs(ratisServerConfiguration
+ assertTrue(Math.abs(ratisServerConfiguration
.getNumSnapshotsRetained() - numSnapshots) <= 1);
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java
index 06f3ef625f..1d0f25b3a0 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java
@@ -55,11 +55,12 @@ import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVA
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.ratis.grpc.server.GrpcLogAppender;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -145,7 +146,7 @@ public class TestContainerStateMachineFailureOnRead {
cluster.getStorageContainerManager().getPipelineManager()
.getPipelines(RatisReplicationConfig.getInstance(
HddsProtos.ReplicationFactor.THREE));
- Assertions.assertEquals(1, pipelines.size());
+ assertEquals(1, pipelines.size());
Pipeline ratisPipeline = pipelines.iterator().next();
Optional<HddsDatanodeService> dnToStop =
@@ -159,7 +160,7 @@ public class TestContainerStateMachineFailureOnRead {
}
}).findFirst();
- Assertions.assertTrue(dnToStop.isPresent());
+ assertTrue(dnToStop.isPresent());
cluster.shutdownHddsDatanode(dnToStop.get().getDatanodeDetails());
// Verify healthy pipeline before creating key
try (XceiverClientRatis xceiverClientRatis =
@@ -182,7 +183,7 @@ public class TestContainerStateMachineFailureOnRead {
List<OmKeyLocationInfo> locationInfoList =
groupOutputStream.getLocationInfoList();
- Assertions.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
omKeyLocationInfo = locationInfoList.get(0);
key.close();
groupOutputStream.close();
@@ -197,7 +198,7 @@ public class TestContainerStateMachineFailureOnRead {
}
}).findFirst();
- Assertions.assertTrue(leaderDn.isPresent());
+ assertTrue(leaderDn.isPresent());
// delete the container dir from leader
FileUtil.fullyDelete(new File(
leaderDn.get().getDatanodeStateMachine()
@@ -214,7 +215,7 @@ public class TestContainerStateMachineFailureOnRead {
try {
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(pipelines.get(0).getId());
- Assertions.assertEquals(Pipeline.PipelineState.CLOSED,
pipeline.getPipelineState(),
+ assertEquals(Pipeline.PipelineState.CLOSED, pipeline.getPipelineState(),
"Pipeline " + pipeline.getId() + "should be in CLOSED state");
} catch (PipelineNotFoundException e) {
// do nothing
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
index fafba729e0..32fc9ba5c9 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -57,6 +56,8 @@ import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
/**
* Tests the containerStateMachine failure handling by set flush delay.
@@ -164,7 +165,7 @@ public class TestContainerStateMachineFlushDelay {
List<OmKeyLocationInfo> locationInfoList =
groupOutputStream.getLocationInfoList();
- Assertions.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
// delete the container dir
@@ -176,7 +177,7 @@ public class TestContainerStateMachineFlushDelay {
key.close();
// Make sure the container is marked unhealthy
- Assertions.assertSame(
+ assertSame(
cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
index ccb3fc992c..ab2fbeadb6 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.Test;
@@ -53,6 +52,7 @@ import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERV
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Tests the containerStateMachine stream handling.
@@ -177,7 +177,7 @@ public class TestContainerStateMachineStream {
.getContainer(omKeyLocationInfo.getContainerID()).
getContainerData().getBytesUsed();
- Assertions.assertEquals(bytesUsed, size);
+ assertEquals(bytesUsed, size);
}
@@ -208,7 +208,7 @@ public class TestContainerStateMachineStream {
.getContainer(omKeyLocationInfo.getContainerID()).
getContainerData().getBytesUsed();
- Assertions.assertEquals(bytesUsed, size);
+ assertEquals(bytesUsed, size);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
index c9b1f7c170..cd4f1c4ae9 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
@@ -69,8 +69,12 @@ import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -233,8 +237,8 @@ public class TestDeleteWithInAdequateDN {
leader = dn;
}
}
- Assertions.assertNotNull(follower);
- Assertions.assertNotNull(leader);
+ assertNotNull(follower);
+ assertNotNull(leader);
//ensure that the chosen follower is still a follower
Assumptions.assumeTrue(RatisTestHelper.isRatisFollower(follower,
pipeline));
// shutdown the follower node
@@ -291,12 +295,11 @@ public class TestDeleteWithInAdequateDN {
null);
}
} catch (IOException ioe) {
- Assertions.fail("Exception should not be thrown.");
+ fail("Exception should not be thrown.");
}
long numReadStateMachineOps =
stateMachine.getMetrics().getNumReadStateMachineOps();
- Assertions.assertEquals(0,
- stateMachine.getMetrics().getNumReadStateMachineFails());
+ assertEquals(0, stateMachine.getMetrics().getNumReadStateMachineFails());
stateMachine.evictStateMachineCache();
cluster.restartHddsDatanode(follower.getDatanodeDetails(), false);
// wait for the raft server to come up and join the ratis ring
@@ -304,10 +307,9 @@ public class TestDeleteWithInAdequateDN {
// Make sure the readStateMachine call got triggered after the follower
// caught up
- Assertions.assertTrue(stateMachine.getMetrics().getNumReadStateMachineOps()
+ assertTrue(stateMachine.getMetrics().getNumReadStateMachineOps()
> numReadStateMachineOps);
- Assertions.assertEquals(0,
- stateMachine.getMetrics().getNumReadStateMachineFails());
+ assertEquals(0, stateMachine.getMetrics().getNumReadStateMachineFails());
// wait for the chunk to get deleted now
Thread.sleep(10000);
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
@@ -321,10 +323,10 @@ public class TestDeleteWithInAdequateDN {
keyValueHandler.getChunkManager().readChunk(container, blockID,
ChunkInfo.getFromProtoBuf(chunkInfo), null);
}
- Assertions.fail("Expected exception is not thrown");
+ fail("Expected exception is not thrown");
} catch (IOException ioe) {
- Assertions.assertTrue(ioe instanceof StorageContainerException);
- Assertions.assertSame(((StorageContainerException) ioe).getResult(),
+ assertTrue(ioe instanceof StorageContainerException);
+ assertSame(((StorageContainerException) ioe).getResult(),
ContainerProtos.Result.UNABLE_TO_FIND_CHUNK);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
index 550c1841b3..505dab409f 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
@@ -49,8 +49,10 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -128,14 +130,14 @@ public class TestDiscardPreallocatedBlocks {
createKey(keyName, ReplicationType.RATIS, 2 * blockSize);
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// With the initial size provided, it should have pre allocated 2 blocks
- Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size());
+ assertEquals(2, keyOutputStream.getStreamEntries().size());
long containerID1 = keyOutputStream.getStreamEntries().get(0)
.getBlockID().getContainerID();
long containerID2 = keyOutputStream.getStreamEntries().get(1)
.getBlockID().getContainerID();
- Assertions.assertEquals(containerID1, containerID2);
+ assertEquals(containerID1, containerID2);
String dataString =
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
byte[] data = dataString.getBytes(UTF_8);
@@ -152,20 +154,20 @@ public class TestDiscardPreallocatedBlocks {
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
- Assertions.assertEquals(3, datanodes.size());
+ assertEquals(3, datanodes.size());
waitForContainerClose(key);
dataString =
ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize));
data = dataString.getBytes(UTF_8);
key.write(data);
- Assertions.assertEquals(3, keyOutputStream.getStreamEntries().size());
+ assertEquals(3, keyOutputStream.getStreamEntries().size());
// the 1st block got written. Now all the containers are closed, so the 2nd
// pre allocated block will be removed from the list and new block should
// have been allocated
- Assertions.assertEquals(
+ assertEquals(
keyOutputStream.getLocationInfoList().get(0).getBlockID(),
locationInfos.get(0).getBlockID());
- Assertions.assertNotEquals(locationStreamInfos.get(1).getBlockID(),
+ assertNotEquals(locationStreamInfos.get(1).getBlockID(),
keyOutputStream.getLocationInfoList().get(1).getBlockID());
key.close();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index 3d10661f69..8af60c4e80 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.container.TestHelper;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -62,6 +61,9 @@ import java.util.concurrent.TimeUnit;
import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests Exception handling by Ozone Client by set flush delay.
@@ -170,14 +172,14 @@ public class TestFailureHandlingByClientFlushDelay {
.getFixedLengthString(keyString, chunkSize);
// get the name of a valid container
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
List<BlockOutputStreamEntry> streamEntryList =
keyOutputStream.getStreamEntries();
// Assert that 1 block will be preallocated
- Assertions.assertEquals(1, streamEntryList.size());
+ assertEquals(1, streamEntryList.size());
key.write(data.getBytes(UTF_8));
key.flush();
long containerId = streamEntryList.get(0).getBlockID().getContainerID();
@@ -197,12 +199,9 @@ public class TestFailureHandlingByClientFlushDelay {
key.write(data.getBytes(UTF_8));
key.flush();
- Assertions.assertTrue(
- keyOutputStream.getExcludeList().getContainerIds().isEmpty());
- Assertions.assertTrue(
- keyOutputStream.getExcludeList().getDatanodes().isEmpty());
- Assertions.assertTrue(
- keyOutputStream.getExcludeList().getDatanodes().isEmpty());
+ assertTrue(keyOutputStream.getExcludeList().getContainerIds().isEmpty());
+ assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty());
+ assertTrue(keyOutputStream.getExcludeList().getDatanodes().isEmpty());
key.write(data.getBytes(UTF_8));
// The close will just write to the buffer
key.close();
@@ -217,10 +216,10 @@ public class TestFailureHandlingByClientFlushDelay {
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
// Make sure a new block is written
- Assertions.assertNotEquals(
+ assertNotEquals(
keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0)
.getBlockID(), blockId);
- Assertions.assertEquals(3 * data.getBytes(UTF_8).length,
keyInfo.getDataSize());
+ assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8));
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
index 8b39e994b0..0b829814b0 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java
@@ -38,13 +38,17 @@ import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import java.io.IOException;
import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.List;
import java.util.UUID;
import java.util.HashMap;
@@ -143,18 +147,18 @@ public class TestHybridPipelineOnDatanode {
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(pipelineID1);
List<DatanodeDetails> dns = pipeline1.getNodes();
- Assertions.assertEquals(1, dns.size());
+ assertEquals(1, dns.size());
Pipeline pipeline2 =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(pipelineID2);
- Assertions.assertNotEquals(pipeline1, pipeline2);
- Assertions.assertSame(pipeline1.getType(),
+ assertNotEquals(pipeline1, pipeline2);
+ assertSame(pipeline1.getType(),
HddsProtos.ReplicationType.RATIS);
- Assertions.assertSame(pipeline1.getType(), pipeline2.getType());
+ assertSame(pipeline1.getType(), pipeline2.getType());
// assert that the pipeline Id1 and pipelineId2 are on the same node
// but different replication factor
- Assertions.assertTrue(pipeline2.getNodes().contains(dns.get(0)));
+ assertTrue(pipeline2.getNodes().contains(dns.get(0)));
byte[] b1 = new byte[data.length];
byte[] b2 = new byte[data.length];
// now try to read both the keys
@@ -166,8 +170,8 @@ public class TestHybridPipelineOnDatanode {
is = bucket.readKey(keyName2);
is.read(b2);
is.close();
- Assertions.assertArrayEquals(b1, data);
- Assertions.assertArrayEquals(b1, b2);
+ assertArrayEquals(b1, data);
+ assertArrayEquals(b1, b2);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
index 8c8b0a269a..403ece9a0c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -56,6 +55,8 @@ import java.util.concurrent.TimeUnit;
import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Tests MultiBlock Writes with Dn failures by Ozone Client.
@@ -146,12 +147,12 @@ public class TestMultiBlockWritesWithDnFailures {
key.write(data.getBytes(UTF_8));
// get the name of a valid container
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream groupOutputStream =
(KeyOutputStream) key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList =
groupOutputStream.getLocationInfoList();
- Assertions.assertEquals(2, locationInfoList.size());
+ assertEquals(2, locationInfoList.size());
long containerId = locationInfoList.get(1).getContainerID();
ContainerInfo container = cluster.getStorageContainerManager()
.getContainerManager()
@@ -175,7 +176,7 @@ public class TestMultiBlockWritesWithDnFailures {
.setKeyName(keyName)
.build();
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assertions.assertEquals(2 * data.getBytes(UTF_8).length,
keyInfo.getDataSize());
+ assertEquals(2 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName, data.concat(data).getBytes(UTF_8));
}
@@ -191,14 +192,14 @@ public class TestMultiBlockWritesWithDnFailures {
key.write(data.getBytes(UTF_8));
// get the name of a valid container
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream =
(KeyOutputStream) key.getOutputStream();
List<BlockOutputStreamEntry> streamEntryList =
keyOutputStream.getStreamEntries();
// Assert that 6 block will be preallocated
- Assertions.assertEquals(6, streamEntryList.size());
+ assertEquals(6, streamEntryList.size());
key.write(data.getBytes(UTF_8));
key.flush();
long containerId = streamEntryList.get(0).getBlockID().getContainerID();
@@ -227,7 +228,7 @@ public class TestMultiBlockWritesWithDnFailures {
.setKeyName(keyName)
.build();
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
- Assertions.assertEquals(4 * data.getBytes(UTF_8).length,
keyInfo.getDataSize());
+ assertEquals(4 * data.getBytes(UTF_8).length, keyInfo.getDataSize());
validateData(keyName,
data.concat(data).concat(data).concat(data).getBytes(UTF_8));
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
index a0fd275719..8bb723b7dd 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java
@@ -85,6 +85,8 @@ import static
org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
import org.apache.ozone.test.tag.Flaky;
import org.junit.jupiter.api.AfterAll;
@@ -92,7 +94,6 @@ import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
-import org.mockito.Mockito;
class TestOzoneAtRestEncryption {
@@ -675,7 +676,7 @@ class TestOzoneAtRestEncryption {
@Test
void testGetKeyProvider() throws Exception {
KeyProvider kp1 = store.getKeyProvider();
- KeyProvider kpSpy = Mockito.spy(kp1);
+ KeyProvider kpSpy = spy(kp1);
assertNotEquals(kpSpy, kp1);
Cache<URI, KeyProvider> cacheSpy =
((RpcClient)store.getClientProxy()).getKeyProviderCache();
@@ -685,7 +686,7 @@ class TestOzoneAtRestEncryption {
// Verify the spied key provider is closed upon ozone client close
ozClient.close();
- Mockito.verify(kpSpy).close();
+ verify(kpSpy).close();
KeyProvider kp3 = ozClient.getObjectStore().getKeyProvider();
assertNotEquals(kp3, kpSpy);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 5ad49a955c..4aee60b60b 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -61,7 +61,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -83,6 +82,10 @@ import static
org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@@ -178,7 +181,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
// generate a new uploadID.
String uploadIDNew = initiateMultipartUpload(bucket, keyName,
ReplicationType.RATIS, ONE);
- Assertions.assertNotEquals(uploadIDNew, uploadID);
+ assertNotEquals(uploadIDNew, uploadID);
}
@Test
@@ -186,17 +189,17 @@ public class TestOzoneClientMultipartUploadWithFSO {
IOException {
OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName);
- Assertions.assertNotNull(multipartInfo);
+ assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
- Assertions.assertNotNull(multipartInfo.getUploadID());
+ assertNotNull(multipartInfo.getUploadID());
// Call initiate multipart upload for the same key again, this should
// generate a new uploadID.
multipartInfo = bucket.initiateMultipartUpload(keyName);
- Assertions.assertNotNull(multipartInfo);
- Assertions.assertNotEquals(multipartInfo.getUploadID(), uploadID);
- Assertions.assertNotNull(multipartInfo.getUploadID());
+ assertNotNull(multipartInfo);
+ assertNotEquals(multipartInfo.getUploadID(), uploadID);
+ assertNotNull(multipartInfo.getUploadID());
}
@Test
@@ -213,8 +216,8 @@ public class TestOzoneClientMultipartUploadWithFSO {
OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream
.getCommitUploadPartInfo();
- Assertions.assertNotNull(commitUploadPartInfo);
- Assertions.assertNotNull(commitUploadPartInfo.getPartName());
+ assertNotNull(commitUploadPartInfo);
+ assertNotNull(commitUploadPartInfo.getPartName());
}
@Test
@@ -239,12 +242,12 @@ public class TestOzoneClientMultipartUploadWithFSO {
// So, when a part is override partNames will still be same irrespective
// of content in ozone s3. This will make S3 Mpu completeMPU pass when
// comparing part names and large file uploads work using aws cp.
- Assertions.assertEquals(partName, partNameNew, "Part names should be
same");
+ assertEquals(partName, partNameNew, "Part names should be same");
// old part bytes written needs discard and have only
// new part bytes in quota for this bucket
long byteWritten = "name".length() * 3; // data written with replication
- Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
+ assertEquals(volume.getBucket(bucketName).getUsedBytes(),
byteWritten);
}
@@ -269,7 +272,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
long replicatedSize = QuotaUtil.getReplicatedSize(data.length,
bucket.getReplicationConfig());
- Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
+ assertEquals(volume.getBucket(bucketName).getUsedBytes(),
replicatedSize);
//upload same key again
@@ -284,7 +287,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
bucket.completeMultipartUpload(keyName, uploadID, partsMap);
// used sized should remain same, overwrite previous upload
- Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
+ assertEquals(volume.getBucket(bucketName).getUsedBytes(),
replicatedSize);
}
@@ -304,13 +307,13 @@ public class TestOzoneClientMultipartUploadWithFSO {
long replicatedSize = QuotaUtil.getReplicatedSize(data.length,
bucket.getReplicationConfig());
- Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(),
+ assertEquals(volume.getBucket(bucketName).getUsedBytes(),
replicatedSize);
bucket.abortMultipartUpload(keyName, uploadID);
// used size should become zero after aport upload
- Assertions.assertEquals(volume.getBucket(bucketName).getUsedBytes(), 0);
+ assertEquals(volume.getBucket(bucketName).getUsedBytes(), 0);
}
private OzoneBucket getOzoneECBucket(String myBucket)
@@ -374,7 +377,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
// the unused part size should be discarded from the bucket size,
// 30000000 - 10000000 = 20000000
long bucketSize = volume.getBucket(bucketName).getUsedBytes();
- Assertions.assertEquals(bucketSize, data.length * 2);
+ assertEquals(bucketSize, data.length * 2);
}
@Test
@@ -449,7 +452,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
keyName = parentDir + UUID.randomUUID();
String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE);
- Assertions.assertEquals(volume.getBucket(bucketName).getUsedNamespace(),
4);
+ assertEquals(volume.getBucket(bucketName).getUsedNamespace(), 4);
// upload part 1.
byte[] data = generateData(5 * 1024 * 1024,
@@ -472,9 +475,9 @@ public class TestOzoneClientMultipartUploadWithFSO {
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo =
bucket.completeMultipartUpload(keyName,
uploadID, partsMap);
- Assertions.assertNotNull(omMultipartUploadCompleteInfo);
+ assertNotNull(omMultipartUploadCompleteInfo);
- Assertions.assertNotNull(omMultipartCommitUploadPartInfo);
+ assertNotNull(omMultipartCommitUploadPartInfo);
byte[] fileContent = new byte[data.length];
try (OzoneInputStream inputStream = bucket.readKey(keyName)) {
@@ -485,14 +488,14 @@ public class TestOzoneClientMultipartUploadWithFSO {
// Combine all parts data, and check is it matching with get key data.
String part1 = new String(data, UTF_8);
sb.append(part1);
- Assertions.assertEquals(sb.toString(), new String(fileContent, UTF_8));
+ assertEquals(sb.toString(), new String(fileContent, UTF_8));
try {
ozoneOutputStream.close();
- Assertions.fail("testCommitPartAfterCompleteUpload failed");
+ fail("testCommitPartAfterCompleteUpload failed");
} catch (IOException ex) {
- Assertions.assertTrue(ex instanceof OMException);
- Assertions.assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
+ assertTrue(ex instanceof OMException);
+ assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
((OMException) ex).getResult());
}
}
@@ -570,8 +573,8 @@ public class TestOzoneClientMultipartUploadWithFSO {
metadataMgr.getOpenKeyTable(bucketLayout).get(multipartOpenKey);
OmMultipartKeyInfo omMultipartKeyInfo =
metadataMgr.getMultipartInfoTable().get(multipartKey);
- Assertions.assertNull(omKeyInfo);
- Assertions.assertNull(omMultipartKeyInfo);
+ assertNull(omKeyInfo);
+ assertNull(omMultipartKeyInfo);
// Since deleteTable operation is performed via
// batchOp - Table.putWithBatch(), which is an async operation and
@@ -601,17 +604,17 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 3);
- Assertions.assertEquals(
+ assertEquals(
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
ozoneMultipartUploadPartListParts.getReplicationConfig());
- Assertions.assertEquals(3,
+ assertEquals(3,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
verifyPartNamesInDB(partsMap,
ozoneMultipartUploadPartListParts, uploadID);
- Assertions.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+ assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
}
private void verifyPartNamesInDB(Map<Integer, String> partsMap,
@@ -637,7 +640,7 @@ public class TestOzoneClientMultipartUploadWithFSO {
keyName, uploadID);
OmMultipartKeyInfo omMultipartKeyInfo =
metadataMgr.getMultipartInfoTable().get(multipartKey);
- Assertions.assertNotNull(omMultipartKeyInfo);
+ assertNotNull(omMultipartKeyInfo);
for (OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo :
omMultipartKeyInfo.getPartKeyInfoMap()) {
@@ -648,21 +651,21 @@ public class TestOzoneClientMultipartUploadWithFSO {
metadataMgr.getOzoneKey(volumeName, bucketName, keyName);
// partKeyName format in DB - partKeyName + ClientID
- Assertions.assertTrue(partKeyName.startsWith(fullKeyPartName),
+ assertTrue(partKeyName.startsWith(fullKeyPartName),
"Invalid partKeyName format in DB: " + partKeyName
+ ", expected name:" + fullKeyPartName);
listPartNames.remove(partKeyName);
}
- Assertions.assertTrue(listPartNames.isEmpty(),
+ assertTrue(listPartNames.isEmpty(),
"Wrong partKeyName format in DB!");
}
private String verifyPartNames(Map<Integer, String> partsMap, int index,
OzoneMultipartUploadPartListParts
ozoneMultipartUploadPartListParts) {
- Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+ assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
.getPartInfoList().get(index).getPartNumber()),
ozoneMultipartUploadPartListParts.getPartInfoList().get(index)
.getPartName());
@@ -692,37 +695,37 @@ public class TestOzoneClientMultipartUploadWithFSO {
OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts =
bucket.listParts(keyName, uploadID, 0, 2);
- Assertions.assertEquals(
+ assertEquals(
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
ozoneMultipartUploadPartListParts.getReplicationConfig());
- Assertions.assertEquals(2,
+ assertEquals(2,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
- Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+ assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
.getPartInfoList().get(0).getPartNumber()),
ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
.getPartName());
- Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+ assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
.getPartInfoList().get(1).getPartNumber()),
ozoneMultipartUploadPartListParts.getPartInfoList().get(1)
.getPartName());
// Get remaining
- Assertions.assertTrue(ozoneMultipartUploadPartListParts.isTruncated());
+ assertTrue(ozoneMultipartUploadPartListParts.isTruncated());
ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID,
ozoneMultipartUploadPartListParts.getNextPartNumberMarker(), 2);
- Assertions.assertEquals(1,
+ assertEquals(1,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
- Assertions.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
+ assertEquals(partsMap.get(ozoneMultipartUploadPartListParts
.getPartInfoList().get(0).getPartNumber()),
ozoneMultipartUploadPartListParts.getPartInfoList().get(0)
.getPartName());
// As we don't have any parts for this, we should get false here
- Assertions.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+ assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
}
@@ -750,15 +753,14 @@ public class TestOzoneClientMultipartUploadWithFSO {
// Should return empty
- Assertions.assertEquals(0,
- ozoneMultipartUploadPartListParts.getPartInfoList().size());
- Assertions.assertEquals(
+ assertEquals(0,
ozoneMultipartUploadPartListParts.getPartInfoList().size());
+ assertEquals(
RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE),
ozoneMultipartUploadPartListParts.getReplicationConfig());
// As we don't have any parts with greater than partNumberMarker and list
// is not truncated, so it should return false here.
- Assertions.assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
+ assertFalse(ozoneMultipartUploadPartListParts.isTruncated());
}
@@ -797,49 +799,49 @@ public class TestOzoneClientMultipartUploadWithFSO {
uploadPart(bucket, key3, uploadID3, 1, "data".getBytes(UTF_8));
OzoneMultipartUploadList listMPUs = bucket.listMultipartUploads("dir1");
- Assertions.assertEquals(3, listMPUs.getUploads().size());
+ assertEquals(3, listMPUs.getUploads().size());
List<String> expectedList = new ArrayList<>(keys);
for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
expectedList.remove(mpu.getKeyName());
}
- Assertions.assertEquals(0, expectedList.size());
+ assertEquals(0, expectedList.size());
listMPUs = bucket.listMultipartUploads("dir1/dir2");
- Assertions.assertEquals(2, listMPUs.getUploads().size());
+ assertEquals(2, listMPUs.getUploads().size());
expectedList = new ArrayList<>();
expectedList.add(key2);
expectedList.add(key3);
for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
expectedList.remove(mpu.getKeyName());
}
- Assertions.assertEquals(0, expectedList.size());
+ assertEquals(0, expectedList.size());
listMPUs = bucket.listMultipartUploads("dir1/dir2/dir3");
- Assertions.assertEquals(1, listMPUs.getUploads().size());
+ assertEquals(1, listMPUs.getUploads().size());
expectedList = new ArrayList<>();
expectedList.add(key3);
for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
expectedList.remove(mpu.getKeyName());
}
- Assertions.assertEquals(0, expectedList.size());
+ assertEquals(0, expectedList.size());
// partial key
listMPUs = bucket.listMultipartUploads("d");
- Assertions.assertEquals(3, listMPUs.getUploads().size());
+ assertEquals(3, listMPUs.getUploads().size());
expectedList = new ArrayList<>(keys);
for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
expectedList.remove(mpu.getKeyName());
}
- Assertions.assertEquals(0, expectedList.size());
+ assertEquals(0, expectedList.size());
// partial key
listMPUs = bucket.listMultipartUploads("");
- Assertions.assertEquals(3, listMPUs.getUploads().size());
+ assertEquals(3, listMPUs.getUploads().size());
expectedList = new ArrayList<>(keys);
for (OzoneMultipartUpload mpu : listMPUs.getUploads()) {
expectedList.remove(mpu.getKeyName());
}
- Assertions.assertEquals(0, expectedList.size());
+ assertEquals(0, expectedList.size());
}
private String verifyUploadedPart(String uploadID, String partName,
@@ -861,21 +863,20 @@ public class TestOzoneClientMultipartUploadWithFSO {
OmMultipartKeyInfo omMultipartKeyInfo =
metadataMgr.getMultipartInfoTable().get(multipartKey);
- Assertions.assertNotNull(omKeyInfo);
- Assertions.assertNotNull(omMultipartKeyInfo);
- Assertions.assertEquals(OzoneFSUtils.getFileName(keyName),
- omKeyInfo.getKeyName());
- Assertions.assertEquals(uploadID, omMultipartKeyInfo.getUploadID());
+ assertNotNull(omKeyInfo);
+ assertNotNull(omMultipartKeyInfo);
+ assertEquals(OzoneFSUtils.getFileName(keyName), omKeyInfo.getKeyName());
+ assertEquals(uploadID, omMultipartKeyInfo.getUploadID());
for (OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo :
omMultipartKeyInfo.getPartKeyInfoMap()) {
OmKeyInfo currentKeyPartInfo =
OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo());
- Assertions.assertEquals(keyName, currentKeyPartInfo.getKeyName());
+ assertEquals(keyName, currentKeyPartInfo.getKeyName());
// verify dbPartName
- Assertions.assertEquals(partName, partKeyInfo.getPartName());
+ assertEquals(partName, partKeyInfo.getPartName());
}
return multipartKey;
}
@@ -912,9 +913,9 @@ public class TestOzoneClientMultipartUploadWithFSO {
OmMultipartInfo multipartInfo = oBucket.initiateMultipartUpload(kName,
replicationType, replicationFactor);
- Assertions.assertNotNull(multipartInfo);
+ assertNotNull(multipartInfo);
String uploadID = multipartInfo.getUploadID();
- Assertions.assertNotNull(multipartInfo.getUploadID());
+ assertNotNull(multipartInfo.getUploadID());
return uploadID;
}
@@ -931,8 +932,8 @@ public class TestOzoneClientMultipartUploadWithFSO {
OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
ozoneOutputStream.getCommitUploadPartInfo();
- Assertions.assertNotNull(omMultipartCommitUploadPartInfo);
- Assertions.assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
+ assertNotNull(omMultipartCommitUploadPartInfo);
+ assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
return omMultipartCommitUploadPartInfo.getPartName();
}
@@ -942,8 +943,8 @@ public class TestOzoneClientMultipartUploadWithFSO {
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = oBucket
.completeMultipartUpload(kName, uploadID, partsMap);
- Assertions.assertNotNull(omMultipartUploadCompleteInfo);
- Assertions.assertNotNull(omMultipartUploadCompleteInfo.getHash());
+ assertNotNull(omMultipartUploadCompleteInfo);
+ assertNotNull(omMultipartUploadCompleteInfo.getHash());
}
private byte[] generateData(int size, byte val) {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
index f1d18f4629..49a3db9187 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
@@ -46,9 +46,10 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.TestHelper;
import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.ratis.protocol.exceptions.GroupMismatchException;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -143,12 +144,12 @@ public class TestOzoneClientRetriesOnExceptionFlushDelay {
byte[] data1 =
ContainerTestHelper.getFixedLengthString(keyString, dataLength)
.getBytes(UTF_8);
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
long containerID =
keyOutputStream.getStreamEntries().get(0).
getBlockID().getContainerID();
- Assertions.assertEquals(1, keyOutputStream.getStreamEntries().size());
+ assertEquals(1, keyOutputStream.getStreamEntries().size());
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
.getContainer(ContainerID.valueOf(containerID));
@@ -163,17 +164,17 @@ public class TestOzoneClientRetriesOnExceptionFlushDelay {
key.write(data1);
OutputStream stream = keyOutputStream.getStreamEntries().get(0)
.getOutputStream();
- Assertions.assertTrue(stream instanceof BlockOutputStream);
+ assertTrue(stream instanceof BlockOutputStream);
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
TestHelper.waitForPipelineClose(key, cluster, false);
key.flush();
- Assertions.assertTrue(HddsClientUtils.checkForException(blockOutputStream
+ assertTrue(HddsClientUtils.checkForException(blockOutputStream
.getIoException()) instanceof GroupMismatchException);
- Assertions.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
+ assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
.contains(pipeline.getId()));
- Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size());
+ assertEquals(2, keyOutputStream.getStreamEntries().size());
key.close();
- Assertions.assertEquals(0, keyOutputStream.getStreamEntries().size());
+ assertEquals(0, keyOutputStream.getStreamEntries().size());
validateData(keyName, data1);
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
index 16f8ef1398..037965f580 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
@@ -50,9 +50,11 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.TestHelper;
import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
import org.apache.ratis.protocol.exceptions.GroupMismatchException;
import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -151,12 +153,12 @@ public class TestOzoneClientRetriesOnExceptions {
byte[] data1 =
ContainerTestHelper.getFixedLengthString(keyString, dataLength)
.getBytes(UTF_8);
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
long containerID =
keyOutputStream.getStreamEntries().get(0).
getBlockID().getContainerID();
- Assertions.assertEquals(1, keyOutputStream.getStreamEntries().size());
+ assertEquals(1, keyOutputStream.getStreamEntries().size());
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
.getContainer(ContainerID.valueOf(containerID));
@@ -171,17 +173,17 @@ public class TestOzoneClientRetriesOnExceptions {
key.write(data1);
OutputStream stream = keyOutputStream.getStreamEntries().get(0)
.getOutputStream();
- Assertions.assertTrue(stream instanceof BlockOutputStream);
+ assertTrue(stream instanceof BlockOutputStream);
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
TestHelper.waitForPipelineClose(key, cluster, false);
key.flush();
- Assertions.assertTrue(HddsClientUtils.checkForException(blockOutputStream
+ assertTrue(HddsClientUtils.checkForException(blockOutputStream
.getIoException()) instanceof GroupMismatchException);
- Assertions.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
+ assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
.contains(pipeline.getId()));
- Assertions.assertEquals(2, keyOutputStream.getStreamEntries().size());
+ assertEquals(2, keyOutputStream.getStreamEntries().size());
key.close();
- Assertions.assertEquals(0, keyOutputStream.getStreamEntries().size());
+ assertEquals(0, keyOutputStream.getStreamEntries().size());
validateData(keyName, data1);
}
@@ -190,10 +192,10 @@ public class TestOzoneClientRetriesOnExceptions {
String keyName = getKeyName();
OzoneOutputStream key = createKey(
keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize);
- Assertions.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
+ assertTrue(key.getOutputStream() instanceof KeyOutputStream);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
List<BlockOutputStreamEntry> entries = keyOutputStream.getStreamEntries();
- Assertions.assertEquals((MAX_RETRIES + 1),
+ assertEquals((MAX_RETRIES + 1),
keyOutputStream.getStreamEntries().size());
int dataLength = maxFlushSize + 50;
// write data more than 1 chunk
@@ -220,7 +222,7 @@ public class TestOzoneClientRetriesOnExceptions {
}
key.write(data1);
OutputStream stream = entries.get(0).getOutputStream();
- Assertions.assertTrue(stream instanceof BlockOutputStream);
+ assertTrue(stream instanceof BlockOutputStream);
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
TestHelper.waitForContainerClose(key, cluster);
// Ensure that blocks for the key have been allocated to at least N+1
@@ -233,11 +235,11 @@ public class TestOzoneClientRetriesOnExceptions {
key.write(data1);
// ensure that write is flushed to dn
key.flush();
- Assertions.fail("Expected exception not thrown");
+ fail("Expected exception not thrown");
} catch (IOException ioe) {
- Assertions.assertTrue(HddsClientUtils.checkForException(blockOutputStream
+ assertTrue(HddsClientUtils.checkForException(blockOutputStream
.getIoException()) instanceof ContainerNotOpenException);
- Assertions.assertTrue(ioe.
+ assertTrue(ioe.
getMessage().contains(
"Retry request failed. " +
"retries get failed due to exceeded maximum " +
@@ -245,14 +247,14 @@ public class TestOzoneClientRetriesOnExceptions {
}
try {
key.flush();
- Assertions.fail("Expected exception not thrown");
+ fail("Expected exception not thrown");
} catch (IOException ioe) {
- Assertions.assertTrue(ioe.getMessage().contains("Stream is closed"));
+ assertTrue(ioe.getMessage().contains("Stream is closed"));
}
try {
key.close();
} catch (IOException ioe) {
- Assertions.fail("Expected should not be thrown");
+ fail("Expected should not be thrown");
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
index 2a5122341d..8b75d05c9c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.api.MethodOrderer;
@@ -193,7 +192,7 @@ public class TestOzoneRpcClientForAclAuditLog {
OzoneVolume retVolumeinfo = store.getVolume(volumeName);
verifyLog(OMAction.READ_VOLUME.name(), volumeName,
AuditEventStatus.SUCCESS.name());
-
Assertions.assertTrue(retVolumeinfo.getName().equalsIgnoreCase(volumeName));
+ assertTrue(retVolumeinfo.getName().equalsIgnoreCase(volumeName));
OzoneObj volObj = new OzoneObjInfo.Builder()
.setVolumeName(volumeName)
@@ -205,7 +204,7 @@ public class TestOzoneRpcClientForAclAuditLog {
List<OzoneAcl> acls = store.getAcl(volObj);
verifyLog(OMAction.GET_ACL.name(), volumeName,
AuditEventStatus.SUCCESS.name());
- Assertions.assertTrue(acls.size() > 0);
+ assertTrue(acls.size() > 0);
//Testing addAcl
store.addAcl(volObj, USER_ACL);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
index 1dcc8adadd..1aea5b3708 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java
@@ -60,10 +60,13 @@ import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERV
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
import org.apache.ratis.statemachine.impl.StatemachineImplTestUtil;
import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -163,7 +166,7 @@ public class TestValidateBCSIDOnRestart {
KeyOutputStream groupOutputStream = (KeyOutputStream)
key.getOutputStream();
List<OmKeyLocationInfo> locationInfoList =
groupOutputStream.getLocationInfoList();
- Assertions.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
cluster);
@@ -173,7 +176,7 @@ public class TestValidateBCSIDOnRestart {
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
.getContainerData();
- Assertions.assertTrue(containerData instanceof KeyValueContainerData);
+ assertTrue(containerData instanceof KeyValueContainerData);
KeyValueContainerData keyValueContainerData =
(KeyValueContainerData) containerData;
key.close();
@@ -200,14 +203,12 @@ public class TestValidateBCSIDOnRestart {
stateMachine.buildMissingContainerSet(parentPath.toFile());
// Since the snapshot threshold is set to 1, since there are
// applyTransactions, we should see snapshots
- Assertions.assertTrue(parentPath.getParent().toFile().listFiles().length >
0);
+ assertTrue(parentPath.getParent().toFile().listFiles().length > 0);
// make sure the missing containerSet is not empty
HddsDispatcher dispatcher = (HddsDispatcher)
ozoneContainer.getDispatcher();
- Assertions.assertFalse(dispatcher.getMissingContainerSet().isEmpty());
- Assertions
- .assertTrue(dispatcher.getMissingContainerSet()
- .contains(containerID));
+ assertFalse(dispatcher.getMissingContainerSet().isEmpty());
+ assertTrue(dispatcher.getMissingContainerSet().contains(containerID));
// write a new key
key = objectStore.getVolume(volumeName).getBucket(bucketName)
.createKey("ratis", 1024,
@@ -218,7 +219,7 @@ public class TestValidateBCSIDOnRestart {
key.flush();
groupOutputStream = (KeyOutputStream) key.getOutputStream();
locationInfoList = groupOutputStream.getLocationInfoList();
- Assertions.assertEquals(1, locationInfoList.size());
+ assertEquals(1, locationInfoList.size());
omKeyLocationInfo = locationInfoList.get(0);
key.close();
containerID = omKeyLocationInfo.getContainerID();
@@ -228,7 +229,7 @@ public class TestValidateBCSIDOnRestart {
.getContainer().getContainerSet()
.getContainer(omKeyLocationInfo.getContainerID())
.getContainerData();
- Assertions.assertTrue(containerData instanceof KeyValueContainerData);
+ assertTrue(containerData instanceof KeyValueContainerData);
keyValueContainerData = (KeyValueContainerData) containerData;
try (DBHandle db = BlockUtils.getDB(keyValueContainerData, conf)) {
@@ -243,7 +244,7 @@ public class TestValidateBCSIDOnRestart {
index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
cluster.restartHddsDatanode(dn.getDatanodeDetails(), true);
// Make sure the container is marked unhealthy
- Assertions.assertSame(cluster.getHddsDatanodes().get(index)
+ assertSame(cluster.getHddsDatanodes().get(index)
.getDatanodeStateMachine()
.getContainer().getContainerSet().getContainer(containerID)
.getContainerState(),
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]