This is an automated email from the ASF dual-hosted git repository.

chesnay pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git

commit 4c4643c3251c284260c96a2110f4b78c8a369723
Author: Chesnay Schepler <ches...@apache.org>
AuthorDate: Sun Feb 11 01:38:45 2024 +0100

    [FLINK-34422][test] BatchTestBase uses MiniClusterExtension
---
 .../batch/sql/CompactManagedTableITCase.java       |  6 +++--
 .../planner/runtime/utils/BatchTestBase.scala      | 28 ++++++++++++++--------
 2 files changed, 22 insertions(+), 12 deletions(-)

diff --git 
a/flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/runtime/batch/sql/CompactManagedTableITCase.java
 
b/flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/runtime/batch/sql/CompactManagedTableITCase.java
index 4974b14feda..d4b3cbce27c 100644
--- 
a/flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/runtime/batch/sql/CompactManagedTableITCase.java
+++ 
b/flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/runtime/batch/sql/CompactManagedTableITCase.java
@@ -61,8 +61,7 @@ import static org.assertj.core.api.Assertions.fail;
 /** IT Case for testing managed table compaction. */
 class CompactManagedTableITCase extends BatchTestBase {
 
-    private final ObjectIdentifier tableIdentifier =
-            ObjectIdentifier.of(tEnv().getCurrentCatalog(), 
tEnv().getCurrentDatabase(), "MyTable");
+    private ObjectIdentifier tableIdentifier;
     private final Map<CatalogPartitionSpec, List<RowData>> collectedElements = 
new HashMap<>();
 
     private Path rootPath;
@@ -73,6 +72,9 @@ class CompactManagedTableITCase extends BatchTestBase {
     @BeforeEach
     public void before() throws Exception {
         super.before();
+        tableIdentifier =
+                ObjectIdentifier.of(
+                        tEnv().getCurrentCatalog(), 
tEnv().getCurrentDatabase(), "MyTable");
         MANAGED_TABLES.put(tableIdentifier, new AtomicReference<>());
         referenceOfManagedTableFileEntries = new AtomicReference<>();
         MANAGED_TABLE_FILE_ENTRIES.put(tableIdentifier, 
referenceOfManagedTableFileEntries);
diff --git 
a/flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/utils/BatchTestBase.scala
 
b/flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/utils/BatchTestBase.scala
index fb5a9a058ca..cb509321f34 100644
--- 
a/flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/utils/BatchTestBase.scala
+++ 
b/flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/utils/BatchTestBase.scala
@@ -57,15 +57,11 @@ import org.junit.jupiter.api.{AfterEach, BeforeEach}
 class BatchTestBase extends BatchAbstractTestBase {
 
   protected var settings = 
EnvironmentSettings.newInstance().inBatchMode().build()
-  protected var testingTableEnv: TestingTableEnvironment = 
TestingTableEnvironment
-    .create(settings, catalogManager = None, TableConfig.getDefault)
-  protected var tEnv: TableEnvironment = testingTableEnv
-  tEnv.getConfig.set(BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_ENABLED, 
Boolean.box(false))
-  protected var planner =
-    
tEnv.asInstanceOf[TableEnvironmentImpl].getPlanner.asInstanceOf[PlannerBase]
-  protected var env: StreamExecutionEnvironment = planner.getExecEnv
-  env.getConfig.enableObjectReuse()
-  protected var tableConfig: TableConfig = tEnv.getConfig
+  protected var testingTableEnv: TestingTableEnvironment = _
+  protected var tEnv: TableEnvironment = _
+  protected var planner: PlannerBase = _
+  protected var env: StreamExecutionEnvironment = _
+  protected var tableConfig: TableConfig = _
 
   val LINE_COL_PATTERN: Pattern = Pattern.compile("At line ([0-9]+), column 
([0-9]+)")
   val LINE_COL_TWICE_PATTERN: Pattern = Pattern.compile(
@@ -74,10 +70,22 @@ class BatchTestBase extends BatchAbstractTestBase {
 
   @throws(classOf[Exception])
   @BeforeEach
-  def before(): Unit = {
+  def setupEnv(): Unit = {
+    testingTableEnv = TestingTableEnvironment
+      .create(settings, catalogManager = None, TableConfig.getDefault)
+    tEnv = testingTableEnv
+    
tEnv.getConfig.set(BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_ENABLED, 
Boolean.box(false))
+    planner = 
tEnv.asInstanceOf[TableEnvironmentImpl].getPlanner.asInstanceOf[PlannerBase]
+    env = planner.getExecEnv
+    env.getConfig.enableObjectReuse()
+    tableConfig = tEnv.getConfig
     BatchTestBase.configForMiniCluster(tableConfig)
   }
 
+  @throws(classOf[Exception])
+  @BeforeEach
+  def before(): Unit = {}
+
   @AfterEach
   def after(): Unit = {
     TestValuesTableFactory.clearAllData()

Reply via email to