This is an automated email from the ASF dual-hosted git repository.

djwang pushed a commit to branch merge-with-upstream
in repository https://gitbox.apache.org/repos/asf/cloudberry-pxf.git

commit fdf58f3fae1818163ab5dd7e06093b5df421f29e
Author: huluhuifeng <[email protected]>
AuthorDate: Mon Dec 22 18:37:32 2025 +0800

    Fix: Add load/performance/extension test groups and align env/test 
expectations
    
    - add bench_test and pxf_extension_test in run_tests.sh, plus matrix 
entries for bench and pxf_extension in CI
    - bump surefire heap to 4G to avoid OOM
    - update gpupgrade expected outputs to new PXF_HOME paths and JSON 
formatter error text
    - make 
ProtocolUtils/HiveBaseTest/JdbcHiveTest/OrcWriteTest/ParquetWriteTest more 
robust to env defaults (protocol, creds, hive JDBC URL)
    - keep MultiServerTest running under HDFS with a safe working directory 
fallback
    - set distribution key and INSERT pattern for performance test data load
---
 .github/workflows/pxf-ci.yml                       |   2 +
 .gitignore                                         |   1 +
 automation/pom.xml                                 |   2 +-
 .../expected/query01.ans                           |  14 +--
 .../expected/query01.ans                           |  16 +--
 .../json/invalid_encoding/expected/query01.ans     |   2 +-
 .../jdbc/session_params/expected/query01.ans       |   2 +-
 .../pxf/automation/utils/system/ProtocolUtils.java |  19 ++-
 .../automation/features/general/FailOverTest.java  | 103 ++++++++++++++--
 .../pxf/automation/features/jdbc/JdbcHiveTest.java |   4 +-
 .../features/multiserver/MultiServerTest.java      |   7 +-
 .../pxf/automation/features/orc/OrcWriteTest.java  |   2 +-
 .../features/parquet/ParquetWriteTest.java         |   2 +-
 .../automation/performance/PerformanceTest.java    |   4 +-
 .../pxf-cbdb-dev/ubuntu/script/entrypoint.sh       |   5 +
 .../docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh | 130 ++++++++++++++++++---
 16 files changed, 261 insertions(+), 54 deletions(-)

diff --git a/.github/workflows/pxf-ci.yml b/.github/workflows/pxf-ci.yml
index 1922bce7..1385d430 100644
--- a/.github/workflows/pxf-ci.yml
+++ b/.github/workflows/pxf-ci.yml
@@ -119,6 +119,8 @@ jobs:
           - s3
           - features
           - gpdb
+          - load
+          - pxf_extension
     steps:
     - name: Free disk space
       run: |
diff --git a/.gitignore b/.gitignore
index d01277c2..fc048cf7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,3 +16,4 @@ server/tmp
 /.vscode/settings.json
 /automation/dataTempFolder/
 /cli/go/pkg/
+/automation/test_artifacts
diff --git a/automation/pom.xml b/automation/pom.xml
index a3ffa806..7400a25c 100644
--- a/automation/pom.xml
+++ b/automation/pom.xml
@@ -59,7 +59,7 @@
                 <version>2.15</version>
                 <configuration>
                     <testFailureIgnore>true</testFailureIgnore>
-                    <argLine>-Xmx2048m -XX:MaxPermSize=512m</argLine>
+                    <argLine>-Xmx4096m</argLine>
                     <forkCount>1</forkCount>
                     <reuseForks>false</reuseForks>
                 </configuration>
diff --git 
a/automation/sqlrepo/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
 
b/automation/sqlrepo/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
index 2d491ff5..b5720ece 100644
--- 
a/automation/sqlrepo/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
+++ 
b/automation/sqlrepo/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
@@ -40,11 +40,11 @@ FROM pg_catalog.pg_extension AS e
     INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid)
 WHERE d.deptype = 'e' AND e.extname = 'pxf'
 ORDER BY 1;
-       proname      |            prosrc            |   probin
---------------------+------------------------------+-------------
- pxf_read           | pxfprotocol_import           | $libdir/pxf
- pxf_validate       | pxfprotocol_validate_urls    | $libdir/pxf
- pxf_write          | pxfprotocol_export           | $libdir/pxf
- pxfwritable_export | gpdbwritableformatter_export | $libdir/pxf
- pxfwritable_import | gpdbwritableformatter_import | $libdir/pxf
+       proname      |            prosrc            |             probin
+--------------------+------------------------------+----------------------------------
+ pxf_read           | pxfprotocol_import           | $PXF_HOME/gpextable/pxf
+ pxf_validate       | pxfprotocol_validate_urls    | $PXF_HOME/gpextable/pxf
+ pxf_write          | pxfprotocol_export           | $PXF_HOME/gpextable/pxf
+ pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf
+ pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf
 (5 rows)
diff --git 
a/automation/sqlrepo/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
 
b/automation/sqlrepo/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
index 44a614a0..36314ef5 100644
--- 
a/automation/sqlrepo/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
+++ 
b/automation/sqlrepo/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
@@ -40,13 +40,13 @@ FROM pg_catalog.pg_extension AS e
     INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid)
 WHERE d.deptype = 'e' AND e.extname = 'pxf'
 ORDER BY 1;
-       proname       |            prosrc            |   probin
----------------------+------------------------------+-------------
- pxf_read            | pxfprotocol_import           | $libdir/pxf
- pxf_validate        | pxfprotocol_validate_urls    | $libdir/pxf
- pxf_write           | pxfprotocol_export           | $libdir/pxf
- pxfdelimited_import | pxfdelimited_import          | $libdir/pxf
- pxfwritable_export  | gpdbwritableformatter_export | $libdir/pxf
- pxfwritable_import  | gpdbwritableformatter_import | $libdir/pxf
+       proname       |            prosrc            |             probin
+---------------------+------------------------------+----------------------------------
+ pxf_read            | pxfprotocol_import           | $PXF_HOME/gpextable/pxf
+ pxf_validate        | pxfprotocol_validate_urls    | $PXF_HOME/gpextable/pxf
+ pxf_write           | pxfprotocol_export           | $PXF_HOME/gpextable/pxf
+ pxfdelimited_import | pxfdelimited_import          | $PXF_HOME/gpextable/pxf
+ pxfwritable_export  | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf
+ pxfwritable_import  | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf
 (6 rows)
 
diff --git 
a/automation/sqlrepo/features/hdfs/writable/json/invalid_encoding/expected/query01.ans
 
b/automation/sqlrepo/features/hdfs/writable/json/invalid_encoding/expected/query01.ans
index af75fe8e..e5b7729b 100644
--- 
a/automation/sqlrepo/features/hdfs/writable/json/invalid_encoding/expected/query01.ans
+++ 
b/automation/sqlrepo/features/hdfs/writable/json/invalid_encoding/expected/query01.ans
@@ -7,4 +7,4 @@
 -- end_matchsubs
 
 INSERT INTO pxf_invalid_encoding_json_write SELECT * from gpdb_primitive_types;
-ERROR:  gpdbwritable formatter can only export UTF8 formatted data. Define the 
external table with ENCODING UTF8
+ERROR:  pxfwritable_export formatter can only export UTF8 formatted data. 
Define the external table with ENCODING UTF8
diff --git 
a/automation/sqlrepo/features/jdbc/session_params/expected/query01.ans 
b/automation/sqlrepo/features/jdbc/session_params/expected/query01.ans
index 47a6535a..95e84a15 100644
--- a/automation/sqlrepo/features/jdbc/session_params/expected/query01.ans
+++ b/automation/sqlrepo/features/jdbc/session_params/expected/query01.ans
@@ -5,7 +5,7 @@
 SELECT * FROM pxf_jdbc_read_view_no_params WHERE name='client_min_messages' OR 
name='default_statistics_target' ORDER BY name;
            name            | setting
 ---------------------------+---------
- client_min_messages       | error
+ client_min_messages       | notice
  default_statistics_target | 100
 (2 rows)
 
diff --git 
a/automation/src/main/java/org/greenplum/pxf/automation/utils/system/ProtocolUtils.java
 
b/automation/src/main/java/org/greenplum/pxf/automation/utils/system/ProtocolUtils.java
index e92c55c8..36dd1ac0 100644
--- 
a/automation/src/main/java/org/greenplum/pxf/automation/utils/system/ProtocolUtils.java
+++ 
b/automation/src/main/java/org/greenplum/pxf/automation/utils/system/ProtocolUtils.java
@@ -14,7 +14,14 @@ public class ProtocolUtils {
 
         ProtocolEnum result;
         try {
-            result = ProtocolEnum.valueOf(System.getProperty(PROTOCOL_KEY, 
ProtocolEnum.HDFS.name()).toUpperCase());
+            String protocol = System.getProperty(PROTOCOL_KEY);
+            if (protocol == null) {
+                protocol = System.getenv(PROTOCOL_KEY);
+            }
+            if (protocol == null) {
+                protocol = ProtocolEnum.HDFS.name();
+            }
+            result = ProtocolEnum.valueOf(protocol.toUpperCase());
         } catch (Exception e) {
             result = ProtocolEnum.HDFS; // use HDFS as default mode
         }
@@ -23,15 +30,19 @@ public class ProtocolUtils {
     }
 
     public static String getSecret() {
-        return System.getProperty(AWS_SECRET_ACCESS_KEY);
+        String secret = System.getProperty(AWS_SECRET_ACCESS_KEY);
+        return secret != null ? secret : System.getenv(AWS_SECRET_ACCESS_KEY);
     }
 
     public static String getAccess() {
-        return System.getProperty(AWS_ACCESS_KEY_ID);
+        String access = System.getProperty(AWS_ACCESS_KEY_ID);
+        String result = access != null ? access : 
System.getenv(AWS_ACCESS_KEY_ID);
+        return result;
     }
 
     public static String getPxfTestKeepData() {
-        return System.getProperty(PXF_TEST_KEEP_DATA, "false");
+        String keepData = System.getProperty(PXF_TEST_KEEP_DATA);
+        return keepData != null ? keepData : 
System.getenv().getOrDefault(PXF_TEST_KEEP_DATA, "false");
     }
 
 
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/general/FailOverTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/general/FailOverTest.java
index 22417443..d3b7589e 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/general/FailOverTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/general/FailOverTest.java
@@ -7,6 +7,10 @@ import 
org.greenplum.pxf.automation.structures.tables.pxf.ReadableExternalTable;
 import org.testng.annotations.Test;
 
 import java.io.File;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.URL;
 
 /** Tests how failures are handled **/
 @FailsWithFDW
@@ -28,14 +32,7 @@ public class FailOverTest extends BaseFeature {
     @Override
     protected void afterClass() throws Exception {
         super.afterClass();
-        // We need to restore the service after it has been stopped
-        if (cluster != null) {
-            try {
-                cluster.start(PhdCluster.EnumClusterServices.pxf);
-            } catch (Exception e) {
-                // Ignore if service is already running
-            }
-        }
+        ensurePxfRunning();
     }
 
     /**
@@ -64,5 +61,95 @@ public class FailOverTest extends BaseFeature {
         gpdb.createTableAndVerify(pxfExternalTable);
 
         runSqlTest("features/general/outOfMemory");
+
+        // The test intentionally kills the PXF JVM; restart it for subsequent 
tests.
+        ensurePxfRunning();
+    }
+
+    private void ensurePxfRunning() throws Exception {
+        Integer port = parsePxfPort();
+        if (cluster == null || port == null) {
+            return;
+        }
+
+        String host = getPxfHttpHost();
+        if (waitForPxfHealthy(host, port, 5_000)) {
+            return;
+        }
+
+        // Wait for the OOM kill hook to fully stop the old process to avoid 
false positives
+        // from jps/Bootstrap checks while the JVM is shutting down.
+        waitForPortClosed(host, port, 60_000);
+
+        for (int attempt = 1; attempt <= 3; attempt++) {
+            cluster.restart(PhdCluster.EnumClusterServices.pxf);
+            if (waitForPxfHealthy(host, port, 120_000)) {
+                return;
+            }
+        }
+        throw new RuntimeException("Failed to restart PXF after OutOfMemory 
test");
+    }
+
+    private Integer parsePxfPort() {
+        if (pxfPort == null) {
+            return null;
+        }
+        try {
+            return Integer.parseInt(pxfPort);
+        } catch (NumberFormatException ignored) {
+            return null;
+        }
+    }
+
+    private String getPxfHttpHost() {
+        if (pxfHost == null || pxfHost.trim().isEmpty() || 
"0.0.0.0".equals(pxfHost.trim())) {
+            return "localhost";
+        }
+        return pxfHost.trim();
+    }
+
+    private void waitForPortClosed(String host, int port, long timeoutMs) 
throws InterruptedException {
+        long deadline = System.currentTimeMillis() + timeoutMs;
+        while (System.currentTimeMillis() < deadline) {
+            if (!isPortOpen(host, port, 500)) {
+                return;
+            }
+            Thread.sleep(500);
+        }
+    }
+
+    private boolean waitForPxfHealthy(String host, int port, long timeoutMs) 
throws InterruptedException {
+        long deadline = System.currentTimeMillis() + timeoutMs;
+        while (System.currentTimeMillis() < deadline) {
+            if (isActuatorHealthy(host, port)) {
+                return true;
+            }
+            Thread.sleep(1000);
+        }
+        return false;
+    }
+
+    private boolean isPortOpen(String host, int port, int timeoutMs) {
+        try (Socket socket = new Socket()) {
+            socket.connect(new InetSocketAddress(host, port), timeoutMs);
+            return true;
+        } catch (Exception ignored) {
+            return false;
+        }
+    }
+
+    private boolean isActuatorHealthy(String host, int port) {
+        try {
+            URL url = new URL(String.format("http://%s:%d/actuator/health";, 
host, port));
+            HttpURLConnection connection = (HttpURLConnection) 
url.openConnection();
+            connection.setRequestMethod("GET");
+            connection.setConnectTimeout(2000);
+            connection.setReadTimeout(2000);
+            int code = connection.getResponseCode();
+            connection.disconnect();
+            return code >= 200 && code < 300;
+        } catch (Exception ignored) {
+            return false;
+        }
     }
 }
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcHiveTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcHiveTest.java
index a546ce01..37b9d32e 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcHiveTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcHiveTest.java
@@ -166,7 +166,7 @@ public class JdbcHiveTest extends BaseFeature {
     }
 
     protected void createTables(Hive hive, String serverName, String 
gpdbTypesTableName, String gpdbQueryTableName) throws Exception {
-        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default;auth=noSasl";
+        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default";
         String user = null;
 
         // On kerberized cluster, enabled then we need the 
hive/hiveserver2_hostname principal in the connection string.
@@ -219,7 +219,7 @@ public class JdbcHiveTest extends BaseFeature {
             hiveReadable = TableFactory.getPxfJdbcReadableTable(
                     hiveReadableName, GPDB_WRITE_TYPES_TABLE_FIELDS, 
targetHiveTable.getFullName(), serverName);
         } else {
-            String jdbcUrl = String.format("%s%s:10000/default;auth=noSasl", 
HIVE_JDBC_URL_PREFIX, hive.getHost());
+            String jdbcUrl = String.format("%s%s:10000/default", 
HIVE_JDBC_URL_PREFIX, hive.getHost());
             // create GPDB external table for writing data from GPDB to Hive 
with JDBC profile
             hiveWritable = TableFactory.getPxfJdbcWritableTable(
                     hiveWritableName, GPDB_WRITE_TYPES_TABLE_FIELDS, 
targetHiveTable.getFullName(),
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/multiserver/MultiServerTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/multiserver/MultiServerTest.java
index 8f38be54..d51feb46 100755
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/multiserver/MultiServerTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/multiserver/MultiServerTest.java
@@ -50,9 +50,6 @@ public class MultiServerTest extends BaseFeature {
      */
     @Override
     public void beforeClass() throws Exception {
-        if (ProtocolUtils.getProtocol() == ProtocolEnum.HDFS) {
-            return;
-        }
         // Initialize an additional HDFS system object (optional system object)
         hdfs2 = (Hdfs) systemManager.
                 getSystemObject("/sut", "hdfs2", -1, null, false, null, 
SutFactory.getInstance().getSutInstance());
@@ -71,6 +68,10 @@ public class MultiServerTest extends BaseFeature {
         }
 
         String hdfsWorkingDirectory = hdfs.getWorkingDirectory();
+        if (hdfsWorkingDirectory == null) {
+            // Fallback to the default automation working directory to avoid 
NPE when protocol is HDFS
+            hdfsWorkingDirectory = "/tmp/pxf_automation_data";
+        }
         defaultPath = hdfsWorkingDirectory + "/" + fileName;
 
         // Initialize server objects
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/orc/OrcWriteTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/orc/OrcWriteTest.java
index 560fa063..15a1a23b 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/orc/OrcWriteTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/orc/OrcWriteTest.java
@@ -207,7 +207,7 @@ public class OrcWriteTest extends BaseFeature {
         hive.runQuery(ctasHiveQuery);
 
         // use the Hive JDBC profile to avoid using the PXF ORC reader 
implementation
-        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default;auth=noSasl";
+        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default";
         ExternalTable exHiveJdbcTable = TableFactory.getPxfJdbcReadableTable(
                 gpdbTableNamePrefix + "_readable", 
ORC_PRIMITIVE_TABLE_COLUMNS_READ_FROM_HIVE,
                 hiveTable.getName() + "_ctas", HIVE_JDBC_DRIVER_CLASS, 
jdbcUrl, null);
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java
index 40a81174..3aa53bb8 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java
@@ -369,7 +369,7 @@ public class ParquetWriteTest extends BaseWritableFeature {
         }
 
         // use the Hive JDBC profile to avoid using the PXF Parquet reader 
implementation
-        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default;auth=noSasl";
+        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default";
 
         ExternalTable exHiveJdbcTable = TableFactory.getPxfJdbcReadableTable(
                 readTableName, 
PARQUET_PRIMITIVE_ARRAYS_TABLE_COLUMNS_READ_FROM_HIVE,
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/performance/PerformanceTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/performance/PerformanceTest.java
index 5024a03f..10b8e25f 100755
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/performance/PerformanceTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/performance/PerformanceTest.java
@@ -170,9 +170,11 @@ public class PerformanceTest extends BaseFeature {
 
     private void prepareNativeGpdbData() throws Exception {
         gpdbNativeTable = new Table("perf_test", getColumnTypeGpdb());
+        gpdbNativeTable.setDistributionFields(new String[] { "int0" });
         gpdb.createTableAndVerify(gpdbNativeTable);
 
-        gpdb.insertData(gpdbTextHiveProfile, gpdbNativeTable);
+        gpdb.runQuery(String.format("INSERT INTO %s SELECT * FROM %s",
+                gpdbNativeTable.getName(), gpdbTextHiveProfile.getName()));
     }
 
     @Override
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh
index e3a669bc..953e4903 100755
--- a/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh
@@ -389,10 +389,15 @@ start_hive_services() {
   export PATH="${JAVA_HOME}/bin:${HIVE_ROOT}/bin:${HADOOP_ROOT}/bin:${PATH}"
   export HIVE_HOME="${HIVE_ROOT}"
   export HADOOP_HOME="${HADOOP_ROOT}"
+  local tez_root="${TEZ_ROOT:-${GPHD_ROOT}/tez}"
   # bump HS2 heap to reduce Tez OOMs during tests
   export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
   export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -Xms512m 
${HADOOP_CLIENT_OPTS:-}"
 
+  # ensure Tez libs are available on HDFS for hive.execution.engine=tez
+  "${HADOOP_ROOT}/bin/hadoop" fs -mkdir -p /apps/tez
+  "${HADOOP_ROOT}/bin/hadoop" fs -copyFromLocal -f "${tez_root}"/* /apps/tez
+
   # ensure clean state
   pkill -f HiveServer2 || true
   pkill -f HiveMetaStore || true
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh
index 5d5903d7..6f7d48f1 100755
--- a/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh
@@ -521,9 +521,17 @@ feature_test(){
   ensure_gpupgrade_helpers
   ensure_testplugin_jar
 
+  # Make sure core services are alive before preparing configs
+  health_check_with_retry || true
+
   export PGHOST=127.0.0.1
   export PATH="${GPHOME}/bin:${PATH}"
   ensure_testuser_pg_hba
+  # Clean stale state from previous runs so feature suite starts fresh
+  cleanup_hdfs_test_data
+  hdfs dfs -rm -r -f /tmp/pxf_automation_data >/dev/null 2>&1 || true
+  cleanup_hive_state
+  cleanup_hbase_state
 
   # Prepare MinIO/S3 and restore default server to local HDFS/Hive/HBase
   ensure_minio_bucket
@@ -538,10 +546,6 @@ feature_test(){
   make GROUP="features" || true
   save_test_reports "features"
   echo "[run_tests] GROUP=features finished"
-
-  make GROUP="gpdb" || true
-  save_test_reports "gpdb"
-  echo "[run_tests] GROUP=gpdb finished"
 }
 
 gpdb_test() {
@@ -551,11 +555,87 @@ gpdb_test() {
   echo "[run_tests] GROUP=gpdb finished"
 }
 
+pxf_extension_test(){
+  local sudo_cmd=""
+  if [ "$(id -u)" -ne 0 ]; then
+    sudo_cmd="sudo -n"
+  fi
+  local extension_dir="${GPHOME}/share/postgresql/extension"
+  local pxf_fdw_control="${extension_dir}/pxf_fdw.control"
+  if [ -d "${REPO_ROOT}/fdw" ] && [ -d "${extension_dir}" ]; then
+    for sql in pxf_fdw--2.0.sql pxf_fdw--1.0--2.0.sql pxf_fdw--2.0--1.0.sql; do
+      if [ -f "${REPO_ROOT}/fdw/${sql}" ]; then
+        ${sudo_cmd} cp -f "${REPO_ROOT}/fdw/${sql}" "${extension_dir}/${sql}"
+      fi
+    done
+  fi
+
+  set_pxf_fdw_default_version() {
+    local version="$1"
+    if [ -f "${pxf_fdw_control}" ]; then
+      ${sudo_cmd} sed -i "s/^default_version = '.*'/default_version = 
'${version}'/" "${pxf_fdw_control}"
+    fi
+  }
+
+  set_pxf_fdw_default_version "2.0"
+  make GROUP="pxfExtensionVersion2" || true
+  save_test_reports "pxfExtensionVersion2"
+  make GROUP="pxfExtensionVersion2_1" || true
+  save_test_reports "pxfExtensionVersion2_1"
+
+  set_pxf_fdw_default_version "1.0"
+  make GROUP="pxfFdwExtensionVersion1" || true
+  save_test_reports "pxfFdwExtensionVersion1"
+
+  set_pxf_fdw_default_version "2.0"
+  make GROUP="pxfFdwExtensionVersion2" || true
+  save_test_reports "pxfFdwExtensionVersion2"
+}
+
+bench_prepare_env() {
+  export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-2048}
+  export JAVA_HOME="${JAVA_HADOOP}"
+  export PATH="${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${PATH}"
+
+  hdfs dfs -rm -r -f /tmp/pxf_automation_data 
/gpdb-ud-scratch/tmp/pxf_automation_data >/dev/null 2>&1 || true
+  for scratch in /tmp/pxf_automation_data 
/gpdb-ud-scratch/tmp/pxf_automation_data; do
+    hdfs dfs -mkdir -p "${scratch}" >/dev/null 2>&1 || true
+    hdfs dfs -chmod -R 775 "$(dirname "${scratch}")" >/dev/null 2>&1 || true
+  done
+  hdfs dfs -mkdir -p /tmp/hive >/dev/null 2>&1 || true
+  hdfs dfs -chmod -R 777 /tmp/hive >/dev/null 2>&1 || true
+
+  export PROTOCOL=
+  export PXF_TEST_KEEP_DATA=${PXF_TEST_KEEP_DATA:-true}
+
+  ensure_hive_ready
+}
+
+load_test(){
+  bench_prepare_env
+  make GROUP="load" || true
+  save_test_reports "load"
+  echo "[run_tests] GROUP=load finished"
+}
+
+performance_test(){
+  bench_prepare_env
+  make GROUP="performance" || true
+  save_test_reports "performance"
+  echo "[run_tests] GROUP=performance finished"
+}
+
+bench_test(){
+  load_test
+  performance_test
+}
+
 # Save test reports for a specific group to avoid overwriting
 save_test_reports() {
   local group="$1"
   local surefire_dir="${REPO_ROOT}/automation/target/surefire-reports"
   local logs_dir="${REPO_ROOT}/automation/automation_logs"
+  local pxf_logs_dir="${PXF_BASE:-/home/gpadmin/pxf-base}/logs"
   local artifacts_dir="${REPO_ROOT}/automation/test_artifacts"
   local group_dir="${artifacts_dir}/${group}"
 
@@ -574,6 +654,15 @@ save_test_reports() {
   else
     echo "[run_tests] No automation logs found for $group"
   fi
+
+  # Capture PXF service logs to aid debugging
+  if [ -d "$pxf_logs_dir" ] && [ "$(ls -A "$pxf_logs_dir" 2>/dev/null)" ]; then
+    echo "[run_tests] Saving PXF logs to $group_dir/pxf-logs"
+    mkdir -p "$group_dir/pxf-logs"
+    cp -r "$pxf_logs_dir"/* "$group_dir/pxf-logs/" 2>/dev/null || true
+  else
+    echo "[run_tests] No PXF logs found at $pxf_logs_dir"
+  fi
 }
 
 # Generate test summary from surefire reports
@@ -599,7 +688,7 @@ generate_test_summary() {
 
     local group=$(basename "$group_dir")
     # Skip if it's not a test group directory
-    [[ "$group" =~ 
^(smoke|hcatalog|hcfs|hdfs|hive|gpdb|sanity|hbase|profile|jdbc|proxy|unused|s3|features)$
 ]] || continue
+    [[ "$group" =~ 
^(smoke|hcatalog|hcfs|hdfs|hive|gpdb|sanity|hbase|profile|jdbc|proxy|unused|s3|features|load|performance|pxfExtensionVersion2|pxfExtensionVersion2_1|pxfFdwExtensionVersion1|pxfFdwExtensionVersion2)$
 ]] || continue
 
     echo "Processing $group test reports from $group_dir"
 
@@ -759,16 +848,22 @@ run_single_group() {
       make GROUP="s3"
       save_test_reports "s3"
       ;;
-    features|gpdb)
-      ensure_gpupgrade_helpers
-      ensure_testplugin_jar
-      ensure_minio_bucket
-      ensure_hadoop_s3a_config
-      configure_pxf_s3_server
-      configure_pxf_default_hdfs_server
-      export PROTOCOL=
-      make GROUP="$group"
-      save_test_reports "$group"
+    features)
+      feature_test
+      ;;
+    gpdb)
+      gpdb_test
+      ;;
+    pxf_extension)
+      pxf_extension_test
+      ;;
+    load)
+      bench_prepare_env
+      load_test
+      ;;
+    performance)
+      bench_prepare_env
+      performance_test
       ;;
     proxy)
       export PROTOCOL=
@@ -782,7 +877,7 @@ run_single_group() {
       ;;
     *)
       echo "Unknown test group: $group"
-      echo "Available groups: cli, external-table, server, sanity, smoke, 
hdfs, hcatalog, hcfs, hive, hbase, profile, jdbc, proxy, unused, s3, features, 
gpdb"
+      echo "Available groups: cli, external-table, server, sanity, smoke, 
hdfs, hcatalog, hcfs, hive, hbase, profile, jdbc, proxy, unused, s3, features, 
gpdb, load, performance, bench, pxf_extension"
       exit 1
       ;;
   esac
@@ -809,6 +904,9 @@ main() {
     # Run feature tests (includes features, gpdb)
     feature_test
 
+    # Run bench tests (includes load, performance)
+    bench_test
+
     echo "[run_tests] All test groups completed, generating summary..."
 
     # Generate test summary and return appropriate exit code


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to