ruanhang1993 commented on a change in pull request #17601:
URL: https://github.com/apache/flink/pull/17601#discussion_r786445231



##########
File path: 
flink-connectors/flink-connector-kafka/src/test/java/org/apache/flink/streaming/connectors/kafka/table/KafkaTableITCase.java
##########
@@ -821,6 +826,175 @@ public void testPerPartitionWatermarkWithIdleSource() 
throws Exception {
         deleteTestTopic(topic);
     }
 
+    @Test
+    public void testStartFromGroupOffsetsLatest() throws Exception {
+        testStartFromGroupOffsets("latest", Collections.emptyList());
+    }
+
+    @Test
+    public void testStartFromGroupOffsetsEarliest() throws Exception {
+        final List<String> expected =
+                Arrays.asList(
+                        "+I[0, 0]", "+I[0, 1]", "+I[0, 2]", "+I[1, 3]", "+I[1, 
4]", "+I[1, 5]");
+        testStartFromGroupOffsets("earliest", expected);
+    }
+
+    @Test
+    public void testStartFromGroupOffsetsNone() {
+        try {
+            testStartFromGroupOffsetsWithException("none", 
Collections.emptyList());
+            fail("None offset reset error.");
+        } catch (Exception e) {
+            Throwable rootCause = ExceptionUtils.getRootCause(e);
+            assertTrue(rootCause instanceof NoOffsetForPartitionException);
+        }
+    }
+
+    private void testStartFromGroupOffsets(String reset, List<String> expected)
+            throws ExecutionException, InterruptedException {
+        // we always use a different topic name for each parameterized topic,
+        // in order to make sure the topic can be created.
+        final String tableName = "Table" + format + reset;
+        final String topic = "groupOffset_" + format + reset;
+        createTestTopic(topic, 4, 1);
+
+        // ---------- Produce an event time stream into Kafka 
-------------------
+        String groupId = format + reset;
+        String bootstraps = getBootstrapServers();
+        tEnv.getConfig()
+                .getConfiguration()
+                .set(TABLE_EXEC_SOURCE_IDLE_TIMEOUT, Duration.ofMillis(100));
+
+        final String createTableSql =
+                "CREATE TABLE %s (\n"
+                        + "  `partition_id` INT,\n"
+                        + "  `value` INT\n"
+                        + ") WITH (\n"
+                        + "  'connector' = 'kafka',\n"
+                        + "  'topic' = '%s',\n"
+                        + "  'properties.bootstrap.servers' = '%s',\n"
+                        + "  'properties.group.id' = '%s',\n"
+                        + "  'scan.startup.mode' = 'group-offsets',\n"
+                        + "  'properties.auto.offset.reset' = '%s',\n"
+                        + "  'format' = '%s'\n"
+                        + ")";
+        tEnv.executeSql(
+                String.format(
+                        createTableSql, tableName, topic, bootstraps, groupId, 
reset, format));
+
+        String initialValues =
+                "INSERT INTO "
+                        + tableName
+                        + "\n"
+                        + "VALUES\n"
+                        + " (0, 0),\n"
+                        + " (0, 1),\n"
+                        + " (0, 2),\n"
+                        + " (1, 3),\n"
+                        + " (1, 4),\n"
+                        + " (1, 5)\n";
+        tEnv.executeSql(initialValues).await();
+
+        // ---------- Consume stream from Kafka -------------------
+
+        env.setParallelism(1);
+        String sinkName = "mySink" + format + reset;
+        String createSink =
+                "CREATE TABLE "
+                        + sinkName
+                        + "(\n"
+                        + "  `partition_id` INT,\n"
+                        + "  `value` INT\n"
+                        + ") WITH (\n"
+                        + "  'connector' = 'values'\n"
+                        + ")";
+        tEnv.executeSql(createSink);
+
+        TableResult tableResult =
+                tEnv.executeSql("INSERT INTO " + sinkName + " SELECT * FROM " 
+ tableName);
+
+        if (expected.size() == 0) {
+            KafkaTableTestUtils.waitingEmptyResults(sinkName, 
Duration.ofSeconds(5));

Review comment:
       fixed. I have refactored them with the `CommonTestUtils#waitUntil`.
   This code aims to make sure that we will not get any result from the test. I 
have reimplemented it with `CommonTestUtils#waitUntil` and give it a more 
meaningful name.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to