ferenc-csaky commented on code in PR #22789:
URL: https://github.com/apache/flink/pull/22789#discussion_r1543181369


##########
flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/GSFileSystemScenarioTest.java:
##########
@@ -148,31 +147,31 @@ public void simpleWriteTest() throws IOException {
         // there should be a single blob now, in the specified temporary 
bucket or, if no temporary
         // bucket
         // specified, in the final bucket
-        assertEquals(1, storage.blobs.size());
+        assertThat(storage.blobs.size()).isEqualTo(1);

Review Comment:
   nit: `.isEqualTo(1)` -> `.isOne()` shorthand.



##########
flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/GSFileSystemScenarioTest.java:
##########
@@ -253,23 +252,20 @@ public void compoundWriteTestWithRestore() throws 
IOException {
 
             // there should be exactly one blob after commit, with the 
expected contents.
             // all temporary blobs should be removed.
-            assertEquals(1, storage.blobs.size());
+            assertThat(storage.blobs.size()).isEqualTo(1);

Review Comment:
   nit: `.isEqualTo(1)` -> `.isOne()` shorthand.



##########
flink-filesystems/flink-hadoop-fs/src/test/java/org/apache/flink/runtime/fs/hdfs/AbstractHadoopFileSystemITTest.java:
##########
@@ -95,20 +90,20 @@ public void testSimpleFileWriteAndRead() throws Exception {
     }
 
     @Test
-    public void testDirectoryListing() throws Exception {
+    void testDirectoryListing() throws Exception {
         final Path directory = new Path(basePath, "testdir/");
 
         // directory must not yet exist
-        assertFalse(fs.exists(directory));
+        assertThat(fs.exists(directory)).isFalse();
 
         try {
             // create directory
-            assertTrue(fs.mkdirs(directory));
+            assertThat(fs.mkdirs(directory)).isTrue();
 
             checkEmptyDirectory(directory);
 
             // directory empty
-            assertEquals(0, fs.listStatus(directory).length);
+            assertThat(fs.listStatus(directory).length).isEqualTo(0);

Review Comment:
   nit: Use `.isZero()`.



##########
flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/writer/GSRecoverableFsDataOutputStreamTest.java:
##########
@@ -133,57 +131,57 @@ public void before() {
         }
     }
 
-    @Test
+    @TestTemplate
     public void emptyStreamShouldHaveProperPositionAndComponentObjectCount() {
         if (empty) {
-            assertEquals(0, position);
-            assertEquals(0, componentObjectCount);
+            assertThat(position).isEqualTo(0);
+            assertThat(componentObjectCount).isEqualTo(0);

Review Comment:
   nit: Use `.isZero()`. Applies to every other zero-check in the file.



##########
flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/writer/RecoverableMultiPartUploadImplTest.java:
##########
@@ -123,127 +116,82 @@ public void 
multiplePartAndObjectUploadsShouldBeReflectedInRecoverable() throws
 
         final S3Recoverable recoverable = uploadObject(thirdIncompletePart);
 
-        assertThat(
-                recoverable, isEqualTo(thirdIncompletePart, firstCompletePart, 
secondCompletePart));
+        assertThatIsEqualTo(
+                recoverable, thirdIncompletePart, firstCompletePart, 
secondCompletePart);
     }
 
     @Test
-    public void s3RecoverableReflectsTheLatestPartialObject() throws 
IOException {
+    void s3RecoverableReflectsTheLatestPartialObject() throws IOException {
         final byte[] incompletePartOne = bytesOf("AB");
         final byte[] incompletePartTwo = bytesOf("ABC");
 
         S3Recoverable recoverableOne = uploadObject(incompletePartOne);
         S3Recoverable recoverableTwo = uploadObject(incompletePartTwo);
 
-        assertThat(
-                recoverableTwo.incompleteObjectName(),
-                not(equalTo(recoverableOne.incompleteObjectName())));
+        assertThat(recoverableTwo.incompleteObjectName())
+                .isNotEqualTo(recoverableOne.incompleteObjectName());
     }
 
-    @Test(expected = IllegalStateException.class)
-    public void uploadingNonClosedFileAsCompleteShouldThroughException() 
throws IOException {
+    @Test
+    void uploadingNonClosedFileAsCompleteShouldThroughException() throws 
IOException {
         final byte[] incompletePart = bytesOf("!!!");
 
         final RefCountedBufferingFileStream incompletePartFile = 
writeContent(incompletePart);
 
-        multiPartUploadUnderTest.uploadPart(incompletePartFile);
+        assertThatThrownBy(() -> 
multiPartUploadUnderTest.uploadPart(incompletePartFile))
+                .isInstanceOf(IllegalStateException.class);
     }
 
-    // --------------------------------- Matchers 
---------------------------------
-
-    private static TypeSafeMatcher<StubMultiPartUploader> 
hasMultiPartUploadWithPart(
-            final int partNo, final byte[] content) {
-
-        final TestUploadPartResult expectedCompletePart =
+    private static void assertThatHasMultiPartUploadWithPart(

Review Comment:
   Both custom assert method with a for-loop can be simplified to something 
like this:
   ```java
   private static void assertThatHasUploadedObject(StubMultiPartUploader 
actual, byte[] content) {
       TestPutObjectResult expectedIncompletePart =
               createPutObjectResult(TEST_OBJECT_NAME, content);
   
       
assertThat(actual.getIncompletePartsUploaded()).contains(expectedIncompletePart);
   }
   ```



##########
flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/GSFileSystemScenarioTest.java:
##########
@@ -148,31 +147,31 @@ public void simpleWriteTest() throws IOException {
         // there should be a single blob now, in the specified temporary 
bucket or, if no temporary
         // bucket
         // specified, in the final bucket
-        assertEquals(1, storage.blobs.size());
+        assertThat(storage.blobs.size()).isEqualTo(1);
         GSBlobIdentifier temporaryBlobIdentifier =
                 (GSBlobIdentifier) storage.blobs.keySet().toArray()[0];
         String expectedTemporaryBucket =
                 StringUtils.isNullOrWhitespaceOnly(temporaryBucketName)
                         ? blobIdentifier.bucketName
                         : temporaryBucketName;
-        assertEquals(expectedTemporaryBucket, 
temporaryBlobIdentifier.bucketName);
+        
assertThat(temporaryBlobIdentifier.bucketName).isEqualTo(expectedTemporaryBucket);
 
         // commit
         committer.commit();
 
         // there should be exactly one blob after commit, with the expected 
contents.
         // all temporary blobs should be removed.
-        assertEquals(1, storage.blobs.size());
+        assertThat(storage.blobs.size()).isEqualTo(1);

Review Comment:
   nit: `.isEqualTo(1)` -> `.isOne()` shorthand.



##########
flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/utils/ConfigUtilsHadoopTest.java:
##########
@@ -278,8 +280,8 @@ public void shouldProperlyCreateHadoopConfig() {
         Map<String, String> hadoopConfigMap = 
TestUtils.hadoopConfigToMap(hadoopConfig);
         MapDifference<String, String> difference =
                 Maps.difference(expectedHadoopConfigMap, hadoopConfigMap);
-        assertEquals(Collections.EMPTY_MAP, difference.entriesDiffering());
-        assertEquals(Collections.EMPTY_MAP, difference.entriesOnlyOnLeft());
-        assertEquals(Collections.EMPTY_MAP, difference.entriesOnlyOnRight());
+        
assertThat(difference.entriesDiffering()).isEqualTo(Collections.EMPTY_MAP);
+        
assertThat(difference.entriesOnlyOnLeft()).isEqualTo(Collections.EMPTY_MAP);
+        
assertThat(difference.entriesOnlyOnRight()).isEqualTo(Collections.EMPTY_MAP);

Review Comment:
   Use `.isEmpty()` instead of equal check.



##########
flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/writer/GSRecoverableFsDataOutputStreamTest.java:
##########
@@ -133,57 +131,57 @@ public void before() {
         }
     }
 
-    @Test
+    @TestTemplate
     public void emptyStreamShouldHaveProperPositionAndComponentObjectCount() {
         if (empty) {
-            assertEquals(0, position);
-            assertEquals(0, componentObjectCount);
+            assertThat(position).isEqualTo(0);
+            assertThat(componentObjectCount).isEqualTo(0);
         }
     }
 
-    @Test
+    @TestTemplate
     public void shouldConstructStream() throws IOException {
         if (empty) {
-            assertEquals(0, fsDataOutputStream.getPos());
+            assertThat(fsDataOutputStream.getPos()).isEqualTo(0);
 
         } else {
-            assertEquals(position, fsDataOutputStream.getPos());
+            assertThat(fsDataOutputStream.getPos()).isEqualTo(position);
         }
     }
 
-    @Test
+    @TestTemplate
     public void shouldReturnPosition() throws IOException {
-        assertEquals(position, fsDataOutputStream.getPos());
+        assertThat(fsDataOutputStream.getPos()).isEqualTo(position);
     }
 
     private void writeContent(ThrowingRunnable<IOException> write, byte[] 
expectedContent)
             throws IOException {
 
         // write the byte, confirm position change and existence of write 
channel
-        assertEquals(position, fsDataOutputStream.getPos());
+        assertThat(fsDataOutputStream.getPos()).isEqualTo(position);
         write.run();
-        assertEquals(position + expectedContent.length, 
fsDataOutputStream.getPos());
+        assertThat(fsDataOutputStream.getPos()).isEqualTo(position + 
expectedContent.length);
 
         // close and persist. there should be exactly zero blobs before and 
one after, with this
         // byte value in it
-        assertEquals(0, blobStorage.blobs.size());
+        assertThat(blobStorage.blobs.size()).isEqualTo(0);
         fsDataOutputStream.closeForCommit();
-        assertEquals(1, blobStorage.blobs.size());
+        assertThat(blobStorage.blobs.size()).isEqualTo(1);

Review Comment:
   nit: Use `.isOne()`.



##########
flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/writer/S3RecoverableFsDataOutputStreamTest.java:
##########
@@ -240,28 +242,37 @@ public void discardingUnpersistedUploadedData() throws 
IOException {
         streamUnderTest.write(bytesOf(" world"));
         streamUnderTest.closeForCommit().commit();
 
-        assertThat(multipartUploadUnderTest, hasContent(bytesOf("hello 
world")));
+        assertThat(multipartUploadUnderTest.getPublishedContents())
+                .isEqualTo(bytesOf("hello world"));
     }
 
     @Test
-    public void commitEmptyStreamShouldBeSuccessful() throws IOException {
+    void commitEmptyStreamShouldBeSuccessful() throws IOException {
         streamUnderTest.closeForCommit().commit();
     }
 
-    @Test(expected = IOException.class)
-    public void closeForCommitOnClosedStreamShouldFail() throws IOException {
-        streamUnderTest.closeForCommit().commit();
+    @Test
+    void closeForCommitOnClosedStreamShouldFail() throws IOException {
         streamUnderTest.closeForCommit().commit();
+        Assertions.assertThatThrownBy(() -> 
streamUnderTest.closeForCommit().commit())
+                .isInstanceOf(IOException.class);
     }
 
-    @Test(expected = Exception.class)
-    public void testSync() throws IOException {
+    @Test
+    void testSync() throws IOException {
         streamUnderTest.write(bytesOf("hello"));
         streamUnderTest.write(bytesOf(" world"));
         streamUnderTest.sync();
-        assertThat(multipartUploadUnderTest, hasContent(bytesOf("hello 
world")));
-        
streamUnderTest.write(randomBuffer(RefCountedBufferingFileStream.BUFFER_SIZE + 
1));
-        assertThat(multipartUploadUnderTest, hasContent(bytesOf("hello 
world")));
+
+        assertThat(multipartUploadUnderTest.getPublishedContents())
+                .isEqualTo(bytesOf("hello world"));
+
+        Assertions.assertThatThrownBy(

Review Comment:
   nit: static import for `assertThatThrownBy`.



##########
flink-filesystems/flink-oss-fs-hadoop/src/test/java/org/apache/flink/fs/osshadoop/writer/OSSRecoverableSerializerTest.java:
##########
@@ -145,6 +101,17 @@ private static OSSRecoverable createOSSRecoverable(
         }
     }
 
+    private static void assertIsEqualTo(OSSRecoverable actual, OSSRecoverable 
expected) {
+        
Assertions.assertThat(actual.getObjectName()).isEqualTo(expected.getObjectName());
+        
Assertions.assertThat(actual.getUploadId()).isEqualTo(expected.getUploadId());
+        
Assertions.assertThat(actual.getNumBytesInParts()).isEqualTo(expected.getNumBytesInParts());
+        
Assertions.assertThat(actual.getLastPartObject()).isEqualTo(expected.getLastPartObject());
+        Assertions.assertThat(actual.getLastPartObjectLength())
+                .isEqualTo(expected.getLastPartObjectLength());
+        
Assertions.assertThat(actual.getPartETags().stream().map(PartETag::getETag).toArray())
+                
.isEqualTo(expected.getPartETags().stream().map(PartETag::getETag).toArray());

Review Comment:
   nit: static import for `assertThat`.



##########
flink-filesystems/flink-gs-fs-hadoop/src/test/java/org/apache/flink/fs/gs/writer/GSRecoverableWriterCommitterTest.java:
##########
@@ -132,28 +131,29 @@ public void after() throws IOException {
      *
      * @throws IOException On underlying failure
      */
-    @Test
+    @TestTemplate
     public void commitTest() throws IOException {
         GSRecoverableWriterCommitter committer = commitTestInternal();
         committer.commit();
 
         // there should be exactly one blob left, the final blob identifier. 
validate its contents.
-        assertEquals(1, blobStorage.blobs.size());
+        assertThat(blobStorage.blobs.size()).isEqualTo(1);

Review Comment:
   nit: Use `.isOne()`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to