This is an automated email from the ASF dual-hosted git repository.

maxyang pushed a commit to branch merge-with-upstream
in repository https://gitbox.apache.org/repos/asf/cloudberry-pxf.git

commit b0de1f4c9e9043e127a9d4389fb87efdddd76382
Author: liuxiaoyu <[email protected]>
AuthorDate: Tue Dec 16 09:14:28 2025 +0800

    feat(ci): improve workflow automation and test reporting
    
    - Enhance CI workflow scripts and build process
    - Add detailed test reporting with statistics
    - Fix permission and disk space issues
    - Optimize Java memory and add debug logging
    - Update entrypoint for database initialization
---
 .github/workflows/pxf-ci.yml                       | 499 ++++++++++++++-------
 automation/Makefile                                |   4 +-
 automation/pom.xml                                 |   3 +
 .../expected/query01.ans                           |  14 +-
 .../expected/query01.ans                           |  16 +-
 .../expected/query04.ans                           |  10 +-
 .../json/invalid_encoding/expected/query01.ans     |   2 +-
 .../jdbc/session_params/expected/query01.ans       |   2 +-
 .../automation/features/cloud/CloudAccessTest.java |   4 +
 .../automation/features/cloud/S3SelectTest.java    |  24 +-
 .../automation/features/general/FailOverTest.java  |   6 +-
 .../features/gpupgrade/GpupgradeTest.java          |   8 +-
 .../pxf/automation/features/hive/HiveBaseTest.java |  20 +-
 .../pxf/automation/features/jdbc/JdbcHiveTest.java |   4 +-
 .../features/multiserver/MultiServerTest.java      |   4 +
 .../pxf/automation/features/orc/OrcWriteTest.java  |   2 +-
 .../features/parquet/ParquetWriteTest.java         |   2 +-
 .../writable/HdfsWritableSequenceTest.java         |  17 +-
 .../ubuntu/script/build_cloudberrry.sh             |   3 +-
 .../ubuntu/script/build_cloudberry_deb.sh          |  99 ++++
 .../docker/pxf-cbdb-dev/ubuntu/script/build_pxf.sh |   4 +
 .../pxf-cbdb-dev/ubuntu/script/entrypoint.sh       | 177 +++++++-
 .../docker/pxf-cbdb-dev/ubuntu/script/pxf-test.sh  | 196 ++++++++
 .../docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh | 188 +++++++-
 concourse/singlecluster/bin/gphd-env.sh            |   1 +
 concourse/singlecluster/conf/gphd-conf.sh          |  11 +-
 26 files changed, 1091 insertions(+), 229 deletions(-)

diff --git a/.github/workflows/pxf-ci.yml b/.github/workflows/pxf-ci.yml
index cedb10d6..782a3ba9 100644
--- a/.github/workflows/pxf-ci.yml
+++ b/.github/workflows/pxf-ci.yml
@@ -24,19 +24,89 @@ env:
   PXF_HOME: "/usr/local/pxf"
 
 jobs:
-  pxf-build-install-test:
-    name: Build, Install & Test PXF
+  build-cloudberry-deb:
+    name: Build Cloudberry DEB Package
     runs-on: ubuntu-latest
+    container:
+      image: apache/incubator-cloudberry:cbdb-build-ubuntu22.04-latest
+      options: --user root
     steps:
-    - name: Checkout Apache Cloudberry source
+    - name: Checkout Cloudberry source
       uses: actions/checkout@v4
       with:
         repository: apache/cloudberry
-        ref: main
-        fetch-depth: 1
-        persist-credentials: false
-        path: cloudberry
+        ref: ${{ env.CLOUDBERRY_VERSION }}
+        path: workspace/cloudberry
         submodules: true
+
+    - name: Checkout PXF source (for build script)
+      uses: actions/checkout@v4
+      with:
+        path: cloudberry-pxf
+
+    - name: Build Cloudberry DEB
+      run: |
+        export WORKSPACE=$PWD/workspace
+        export CLOUDBERRY_VERSION=99.0.0
+        export CLOUDBERRY_BUILD=1
+        bash 
cloudberry-pxf/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_cloudberry_deb.sh
+
+    - name: Package Cloudberry source
+      run: |
+        cd workspace
+        tar czf cloudberry-source.tar.gz cloudberry/
+
+    - name: Upload DEB artifact
+      uses: actions/upload-artifact@v4
+      with:
+        name: cloudberry-deb
+        path: workspace/cloudberry-deb/*.deb
+        retention-days: 7
+
+    - name: Upload Cloudberry source artifact
+      uses: actions/upload-artifact@v4
+      with:
+        name: cloudberry-source
+        path: workspace/cloudberry-source.tar.gz
+        retention-days: 7
+
+  build-docker-images:
+    name: Build Docker Images
+    runs-on: ubuntu-latest
+    steps:
+    - name: Checkout PXF source
+      uses: actions/checkout@v4
+      with:
+        path: cloudberry-pxf
+
+    - name: Build singlecluster image
+      run: |
+        cd ${{ github.workspace }}/cloudberry-pxf/concourse/singlecluster
+        docker build -t pxf/singlecluster:3 .
+        docker save pxf/singlecluster:3 > /tmp/singlecluster-image.tar
+
+    - name: Upload singlecluster image
+      uses: actions/upload-artifact@v4
+      with:
+        name: singlecluster-image
+        path: /tmp/singlecluster-image.tar
+        retention-days: 1
+
+  pxf-build-install-test:
+    name: Build, Install & Test PXF
+    needs: [build-cloudberry-deb, build-docker-images]
+    runs-on: ubuntu-latest
+    steps:
+    - name: Free disk space
+      run: |
+        sudo rm -rf /usr/share/dotnet
+        sudo rm -rf /opt/ghc
+        sudo rm -rf /usr/local/share/boost
+        sudo rm -rf /usr/local/lib/android
+        sudo rm -rf /opt/hostedtoolcache
+        sudo docker system prune -af
+        df -h
+
     - name: Checkout Apache Cloudberry pxf source
       uses: actions/checkout@v4
       with:
@@ -46,182 +116,287 @@ jobs:
         persist-credentials: false
         path: cloudberry-pxf
         submodules: true
-    - name: Cache singlecluster Docker image
-      id: cache-singlecluster
-      uses: actions/cache@v4
+
+    - name: Download Cloudberry DEB
+      uses: actions/download-artifact@v4
       with:
-        path: /tmp/singlecluster-image.tar
-        key: singlecluster-${{ 
hashFiles('cloudberry-pxf/concourse/singlecluster/Dockerfile', 
'cloudberry-pxf/concourse/singlecluster/**') }}
-    - name: Build singlecluster image
-      if: steps.cache-singlecluster.outputs.cache-hit != 'true'
-      run: |
-        cd ${{ github.workspace }}/cloudberry-pxf/concourse/singlecluster
-        docker build -t pxf/singlecluster:3 .
-        docker save pxf/singlecluster:3 > /tmp/singlecluster-image.tar
+        name: cloudberry-deb
+        path: /tmp
+    
+    - name: Download Cloudberry source
+      uses: actions/download-artifact@v4
+      with:
+        name: cloudberry-source
+        path: /tmp
+    
+    - name: Download singlecluster image
+      uses: actions/download-artifact@v4
+      with:
+        name: singlecluster-image
+        path: /tmp
+
     - name: Load singlecluster image
-      if: steps.cache-singlecluster.outputs.cache-hit == 'true'
       run: |
         docker load < /tmp/singlecluster-image.tar
-    - name: Run Test
+    
+    - name: Prepare Cloudberry source
+      run: |
+        cd ${{ github.workspace }}
+        tar xzf /tmp/cloudberry-source.tar.gz
+        chmod -R u+rwX,go+rX cloudberry
+    
+    - name: Build and Start Services
+      id: build_start
+      continue-on-error: true
+      run: |
+        cd ${{ github.workspace }}/cloudberry-pxf
+        docker compose -f 
concourse/docker/pxf-cbdb-dev/ubuntu/docker-compose.yml down -v
+        docker compose -f 
concourse/docker/pxf-cbdb-dev/ubuntu/docker-compose.yml build
+        docker compose -f 
concourse/docker/pxf-cbdb-dev/ubuntu/docker-compose.yml up -d
+        docker exec pxf-cbdb-dev sudo chown -R gpadmin:gpadmin 
/home/gpadmin/workspace/cloudberry
+        docker cp /tmp/*.deb pxf-cbdb-dev:/tmp/
+        docker exec pxf-cbdb-dev sudo chown gpadmin:gpadmin /tmp/*.deb
+        docker exec pxf-cbdb-dev bash -lc "cd 
/home/gpadmin/workspace/cloudberry-pxf/concourse/docker/pxf-cbdb-dev/ubuntu && 
./script/entrypoint.sh"
+
+    - name: Test PXF CLI
+      id: test_cli
+      continue-on-error: true
+      if: always()
       run: |
-        cd ${{ github.workspace 
}}/cloudberry-pxf/concourse/docker/pxf-cbdb-dev/ubuntu/
-        docker compose up -d
-        # Wait for container to be ready
-        sleep 10
-        # Execute entrypoint script with correct working directory
-        docker compose exec -T pxf-cbdb-dev /bin/bash -c "cd 
/home/gpadmin/workspace/cloudberry-pxf/concourse/docker/pxf-cbdb-dev/ubuntu && 
./script/entrypoint.sh"
-    - name: Extract test artifacts from container
+        cd ${{ github.workspace }}/cloudberry-pxf
+        docker exec pxf-cbdb-dev bash -lc "cd 
/home/gpadmin/workspace/cloudberry-pxf/concourse/docker/pxf-cbdb-dev/ubuntu && 
./script/pxf-test.sh cli"
+
+    - name: Test PXF Server
+      id: test_server
+      continue-on-error: true
       if: always()
       run: |
-        echo "Test results are already available in mounted volume:"
-        ls -la ${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/ || echo "No test_artifacts 
directory found"
-        
-        # Show summary if available
-        if [ -f ${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/summary.csv ]; then
-          echo "Test Summary:"
-          cat ${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/summary.csv
-        fi
-        
+        cd ${{ github.workspace }}/cloudberry-pxf
+        docker exec pxf-cbdb-dev bash -lc "cd 
/home/gpadmin/workspace/cloudberry-pxf/concourse/docker/pxf-cbdb-dev/ubuntu && 
./script/pxf-test.sh server"
+
+    - name: Test PXF Automation
+      id: test_automation
+      continue-on-error: true
+      if: always()
+      run: |
+        cd ${{ github.workspace }}/cloudberry-pxf
+        docker exec pxf-cbdb-dev bash -lc "cd 
/home/gpadmin/workspace/cloudberry-pxf/concourse/docker/pxf-cbdb-dev/ubuntu && 
./script/pxf-test.sh automation"
+    - name: Collect and upload artifacts
+      if: always()
+      run: |
+        mkdir -p artifacts/logs
+        # Always create a manifest to ensure non-empty artifact bundle
+        echo "PXF artifacts bundle" > artifacts/manifest.txt
+        # Collect test artifacts from mounted volume
+        cp -r ${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/* artifacts/ 2>/dev/null || true
+        # Collect PXF logs from container if available
+        docker exec pxf-cbdb-dev bash -c "cp -r /usr/local/pxf/logs/* 
/tmp/pxf-logs/ 2>/dev/null || true" || true
+        docker cp pxf-cbdb-dev:/tmp/pxf-logs artifacts/logs/ 2>/dev/null || 
true
+        # Record collected files into manifest
+        find artifacts -type f -print >> artifacts/manifest.txt 2>/dev/null || 
true
+      shell: bash
+
     - name: Cleanup containers
       if: always()
       run: |
-        cd cloudberry-pxf/concourse/docker/pxf-cbdb-dev/ubuntu/
-        docker compose down -v || true
+        cd ${{ github.workspace }}/cloudberry-pxf
+        docker compose -f 
concourse/docker/pxf-cbdb-dev/ubuntu/docker-compose.yml down -v || true
 
 
-    - name: Save artifacts
+    - name: Upload PXF artifacts
       if: always()
       uses: actions/upload-artifact@v4
       id: upload_automation_step
       with:
         name: automation-test-results-pxf-cbdb-dev
-        path: |
-          ${{ github.workspace }}/cloudberry-pxf/automation/test_artifacts/
+        path: artifacts/**
+        if-no-files-found: ignore
         retention-days: 30
 
     - name: Evaluate module build/test results
-      if: success() || failure()
-      uses: actions/github-script@v7
-      with:
-        script: |
-          const fs = require('fs');
-          const path = require('path');
-          
-          console.log('Processing test reports for PXF...');
-          
-          // Start building the step summary
-          const summary = core.summary
-            .addHeading('PXF Test Results Summary')
-            .addHeading('๐Ÿ“ฆ Artifacts', 3)
-            .addLink('Raw Test Results', "${{ 
steps.upload_automation_step.outputs.artifact-url }}");
-          
-          let hasErrors = false;
-          
-          // Check if test summary exists
-          const testSummaryPath = '${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/summary.csv';
-          if (fs.existsSync(testSummaryPath)) {
-            try {
-              const csvContent = fs.readFileSync(testSummaryPath, 'utf8');
-              const lines = csvContent.trim().split('\n');
-              
-              if (lines.length > 1) {
-                // Parse CSV and create table
-                const headers = lines[0].split(',');
-                const rows = lines.slice(1).map(line => line.split(','));
-                
-                // Add test results table
-                summary.addHeading('๐Ÿงช Test Results', 3);
-                summary.addTable([
-                  headers,
-                  ...rows
-                ]);
-                
-                // Check for failures
-                let totalTests = 0;
-                let failedTests = 0;
-                let passedTests = 0;
-                
-                rows.forEach(row => {
-                  totalTests++;
-                  if (row[1] === 'FAIL') {
-                    failedTests++;
-                    hasErrors = true;
-                  } else if (row[1] === 'PASS') {
-                    passedTests++;
-                  }
-                });
-                
-                summary.addRaw(`\n\n**Summary**: ${totalTests} test 
components, ${passedTests} passed, ${failedTests} failed\n\n`);
-                
-                if (failedTests > 0) {
-                  core.error(`${failedTests} test component(s) failed`);
-                }
-              }
-            } catch (error) {
-              console.log('Error processing test summary:', error.message);
-              core.error('Error processing test summary');
-              hasErrors = true;
-            }
-          } else {
-            summary.addRaw('No test summary found\n\n');
-          }
-          
-          // Check if TestNG results exist
-          const testReportsDir = '${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/surefire-reports';
-          if (fs.existsSync(testReportsDir)) {
-            const testngResultsPath = path.join(testReportsDir, 
'testng-results.xml');
-            if (fs.existsSync(testngResultsPath)) {
-              try {
-                const xmlContent = fs.readFileSync(testngResultsPath, 'utf8');
-              
-                // Extract test statistics using regex
-                const totalMatch = xmlContent.match(/total="(\d+)"/);
-                const passedMatch = xmlContent.match(/passed="(\d+)"/);
-                const failedMatch = xmlContent.match(/failed="(\d+)"/);
-                const skippedMatch = xmlContent.match(/skipped="(\d+)"/);
-              
-                const total = totalMatch ? totalMatch[1] : '0';
-                const passed = passedMatch ? passedMatch[1] : '0';
-                const failed = failedMatch ? failedMatch[1] : '0';
-                const skipped = skippedMatch ? skippedMatch[1] : '0';
-              
-                // Add TestNG statistics to summary
-                summary
-                  .addHeading('๐Ÿ”ฌ Automation Test Details', 3)
-                  .addTable([
-                    ['Metric', 'Count'],
-                    ['Total Tests', total],
-                    ['Passed', passed],
-                    ['Failed', failed],
-                    ['Skipped', skipped]
-                  ]);
-              
-                // Check if there are failed tests
-                const failedCount = parseInt(failed) || 0;
-                const skippedCount = parseInt(skipped) || 0;
+      if: always()
+      env:
+        BUILD_START: ${{ steps.build_start.outcome }}
+        TEST_CLI: ${{ steps.test_cli.outcome }}
+        TEST_FDW: ${{ steps.test_fdw.outcome }}
+        TEST_SERVER: ${{ steps.test_server.outcome }}
+        TEST_AUTOMATION: ${{ steps.test_automation.outcome }}
+      run: |
+        set -eo pipefail
+
+        status_icon() {
+          case "$1" in
+            success) echo "โœ…";;
+            failure) echo "โŒ";;
+            cancelled) echo "๐Ÿ›‘";;
+            skipped|"") echo "โญ๏ธ";;
+            *) echo "$1";;
+          esac
+        }
+
+        # Use files from Docker volume mapping (no need to copy from container)
+        echo "=== Checking for test results ==="
+        ls -la ${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/ 2>/dev/null || echo "No 
test_artifacts directory"
+
+        # Copy test results from mapped volume
+        if [ -f "${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/test_summary.json" ]; then
+          cp "${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/test_summary.json" 
./test_summary.json
+          echo "Found test_summary.json"
+        else
+          echo 
'{"overall":{"total":0,"passed":0,"failed":0,"skipped":0},"groups":{}}' > 
./test_summary.json
+          echo "No test_summary.json, created default"
+        fi
+
+        if [ -f "${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/component_results.csv" ]; then
+          cp "${{ github.workspace 
}}/cloudberry-pxf/automation/test_artifacts/component_results.csv" 
./component_results.csv
+        else
+          echo "Component,Status,ExitCode" > ./component_results.csv
+        fi
+
+        if [ -d "${{ github.workspace 
}}/cloudberry-pxf/automation/target/surefire-reports" ]; then
+          cp -r "${{ github.workspace 
}}/cloudberry-pxf/automation/target/surefire-reports" ./surefire-reports
+        else
+          mkdir -p ./surefire-reports
+        fi
+
+        echo "=== test_summary.json content ==="
+        if [ -f ./test_summary.json ]; then
+          cat ./test_summary.json
+        else
+          echo "test_summary.json not found"
+        fi
+        echo "=== end of test_summary.json ==="
+
+        BUILD_ICON=$(status_icon "${BUILD_START}")
+        CLI_ICON=$(status_icon "${TEST_CLI}")
+        FDW_ICON=$(status_icon "${TEST_FDW}")
+        SERVER_ICON=$(status_icon "${TEST_SERVER}")
+        AUTO_ICON=$(status_icon "${TEST_AUTOMATION}")
+
+        # Parse component results
+        get_status() {
+          grep "^$1," ./component_results.csv 2>/dev/null | cut -d',' -f2 || 
echo "N/A"
+        }
+
+        CLI_STATUS=$(get_status "CLI")
+        FDW_STATUS=$(get_status "FDW")
+        SERVER_STATUS=$(get_status "Server")
+        AUTO_STATUS=$(get_status "Automation")
+
+        # Read test summary from JSON
+        if command -v jq >/dev/null 2>&1 && [ -f ./test_summary.json ]; then
+          TOTAL_TESTS=$(jq -r '.overall.total // 0' ./test_summary.json 
2>/dev/null || echo "0")
+          PASSED_TESTS=$(jq -r '.overall.passed // 0' ./test_summary.json 
2>/dev/null || echo "0")
+          FAILED_TESTS=$(jq -r '.overall.failed // 0' ./test_summary.json 
2>/dev/null || echo "0")
+          SKIPPED_TESTS=$(jq -r '.overall.skipped // 0' ./test_summary.json 
2>/dev/null || echo "0")
+        else
+          # Fallback to parsing without jq
+          TOTAL_TESTS=$(grep -o '"total":[[:space:]]*[0-9]*' 
./test_summary.json 2>/dev/null | head -1 | grep -o '[0-9]*' || echo "0")
+          PASSED_TESTS=$(grep -o '"passed":[[:space:]]*[0-9]*' 
./test_summary.json 2>/dev/null | head -1 | grep -o '[0-9]*' || echo "0")
+          FAILED_TESTS=$(grep -o '"failed":[[:space:]]*[0-9]*' 
./test_summary.json 2>/dev/null | head -1 | grep -o '[0-9]*' || echo "0")
+          SKIPPED_TESTS=$(grep -o '"skipped":[[:space:]]*[0-9]*' 
./test_summary.json 2>/dev/null | head -1 | grep -o '[0-9]*' || echo "0")
+        fi
+
+        # Ensure variables are numeric
+        TOTAL_TESTS=${TOTAL_TESTS:-0}
+        PASSED_TESTS=${PASSED_TESTS:-0}
+        FAILED_TESTS=${FAILED_TESTS:-0}
+        SKIPPED_TESTS=${SKIPPED_TESTS:-0}
+
+        # Validate numeric values
+        [[ "$TOTAL_TESTS" =~ ^[0-9]+$ ]] || TOTAL_TESTS=0
+        [[ "$PASSED_TESTS" =~ ^[0-9]+$ ]] || PASSED_TESTS=0
+        [[ "$FAILED_TESTS" =~ ^[0-9]+$ ]] || FAILED_TESTS=0
+        [[ "$SKIPPED_TESTS" =~ ^[0-9]+$ ]] || SKIPPED_TESTS=0
+
+        {
+          echo "## PXF Component Test Results"
+          echo ""
+          echo "| Component | Workflow Status | Test Status |"
+          echo "|----------:|:---------------:|:-----------:|"
+          echo "| Build & Start | ${BUILD_ICON} ${BUILD_START:-skipped} | - |"
+          echo "| CLI | ${CLI_ICON} ${TEST_CLI:-skipped} | ${CLI_STATUS} |"
+          echo "| FDW | ${FDW_ICON} ${TEST_FDW:-skipped} | ${FDW_STATUS} |"
+          echo "| Server | ${SERVER_ICON} ${TEST_SERVER:-skipped} | 
${SERVER_STATUS} |"
+          echo "| Automation | ${AUTO_ICON} ${TEST_AUTOMATION:-skipped} | 
${AUTO_STATUS} |"
+          echo ""
+
+          # Automation detailed results
+          if [ "$TOTAL_TESTS" -gt 0 ] 2>/dev/null; then
+            echo "### Automation Test Summary"
+            echo ""
+            echo "| Metric | Count |"
+            echo "|-------:|------:|"
+            echo "| Total | $TOTAL_TESTS |"
+            echo "| Passed | $PASSED_TESTS |"
+            echo "| Failed | $FAILED_TESTS |"
+            echo "| Skipped | $SKIPPED_TESTS |"
+            echo ""
+            
+            # Test results by group from JSON
+            if [ -f ./test_summary.json ]; then
+              echo "### Test Results by Group"
+              echo ""
+              echo "| Test Group | Status | Passed | Failed | Skipped | Total 
|"
+              echo 
"|-----------:|:------:|-------:|-------:|--------:|------:|"
               
-                if (failedCount > 0) {
-                  core.error(`Automation tests failed: ${failedCount} test(s) 
failed`);
-                  hasErrors = true;
-                }
-                if (skippedCount > 0) {
-                  core.warning(`Automation tests incomplete: ${skippedCount} 
test(s) skipped`);
-                }
-              } catch (error) {
-                console.log('Error processing TestNG results:', error.message);
-                core.error('Error processing automation test results');
-                hasErrors = true;
-              }
-            }
-          }
-          
-          // Write to step summary
-          await summary.write();
-          
-          // Exit with error code if there were errors
-          if (hasErrors) {
-            process.exit(1);
-          }
+              # Extract group data dynamically
+              groups=$(grep -o '"[^"]*":' ./test_summary.json | grep -v 
'"overall":\|"groups":\|"timestamp":\|"total":\|"passed":\|"failed":\|"skipped":'
 | sed 's/[":]*//g' | sort -u)
+              for group in $groups; do
+                if grep -q "\"$group\":" ./test_summary.json; then
+                  group_section=$(sed -n "/\"$group\":/,/}/p" 
./test_summary.json)
+                  g_total=$(echo "$group_section" | grep -o 
'"total":[[:space:]]*[0-9]*' | grep -o '[0-9]*' || echo "0")
+                  g_passed=$(echo "$group_section" | grep -o 
'"passed":[[:space:]]*[0-9]*' | grep -o '[0-9]*' || echo "0")
+                  g_failed=$(echo "$group_section" | grep -o 
'"failed":[[:space:]]*[0-9]*' | grep -o '[0-9]*' || echo "0")
+                  g_skipped=$(echo "$group_section" | grep -o 
'"skipped":[[:space:]]*[0-9]*' | grep -o '[0-9]*' || echo "0")
+                  
+                  [ "$g_total" -eq 0 ] && continue
+                  
+                  if [ "$g_failed" -gt 0 ]; then
+                    status_icon="โŒ FAIL"
+                  else
+                    status_icon="โœ… PASS"
+                  fi
+                  
+                  echo "| ${group} | ${status_icon} | ${g_passed} | 
${g_failed} | ${g_skipped} | ${g_total} |"
+                fi
+              done
+              echo ""
+            fi
+          fi
+
+          # Count failures
+          failed_count=$(grep -c ",FAIL," ./component_results.csv 2>/dev/null 
|| echo 0)
+          passed_count=$(grep -c ",PASS," ./component_results.csv 2>/dev/null 
|| echo 0)
+          total_count=$((failed_count + passed_count))
 
+          if [ "$failed_count" -gt 0 ] 2>/dev/null || [ "$FAILED_TESTS" -gt 0 
] 2>/dev/null; then
+            echo "### โš ๏ธ Summary"
+            [ "$failed_count" -gt 0 ] 2>/dev/null && echo "- Components: 
$failed_count of $total_count failed"
+            [ "$FAILED_TESTS" -gt 0 ] 2>/dev/null && echo "- Automation: 
$FAILED_TESTS of $TOTAL_TESTS test cases failed"
+          elif [ "$total_count" -gt 0 ] 2>/dev/null; then
+            echo "### โœ… Summary: All tests passed"
+            [ "$TOTAL_TESTS" -gt 0 ] 2>/dev/null && echo "- Automation: 
$PASSED_TESTS of $TOTAL_TESTS test cases passed"
+          else
+            echo "### โ„น๏ธ Summary: No test results available"
+          fi
+          echo ""
+          echo "### Artifacts"
+          echo "- Uploaded artifact bundle: 
'automation-test-results-pxf-cbdb-dev'"
+        } >> "$GITHUB_STEP_SUMMARY"
 
+        fail=0
+        for v in "${BUILD_START}" "${TEST_CLI}" "${TEST_FDW}" "${TEST_SERVER}" 
"${TEST_AUTOMATION}"; do
+          if [ "$v" = "failure" ]; then fail=1; fi
+        done
 
+        # Also fail if automation tests had failures
+        if [ "$FAILED_TESTS" -gt 0 ] 2>/dev/null; then
+          echo "Automation tests had $FAILED_TESTS failures. Marking job as 
failed."
+          fail=1
+        fi
+
+        if [ "$fail" -ne 0 ]; then
+          echo "One or more components failed. Marking job as failed."
+          exit 1
+        fi
diff --git a/automation/Makefile b/automation/Makefile
index 528eca21..4e03d24f 100755
--- a/automation/Makefile
+++ b/automation/Makefile
@@ -8,6 +8,8 @@
 SHELL := bash
 UNAME_S := $(shell uname -s)
 MAVEN_TEST_OPTS+= -B -e
+MAVEN_OPTS ?= -Xmx2g -Xms512m
+export MAVEN_OPTS
 PXF_TMP_LIB := $(HOME)/automation_tmp_lib
 BASE_PATH   ?= /mnt/nfs/var/nfsshare
 USE_FDW     ?= false
@@ -20,7 +22,7 @@ ifneq "$(GROUP)" ""
        MAVEN_TEST_OPTS+= -Dgroups=$(GROUP)
 endif
 
-MAVEN_TEST_OPTS+= -Djava.awt.headless=true -DuseFDW=$(USE_FDW)
+MAVEN_TEST_OPTS+= -Djava.awt.headless=true -DuseFDW=$(USE_FDW) 
-Duser.timezone=UTC
 
 ifneq "$(OFFLINE)" "true"
        MAVEN_TEST_OPTS+= -U
diff --git a/automation/pom.xml b/automation/pom.xml
index 9479751f..3b190572 100644
--- a/automation/pom.xml
+++ b/automation/pom.xml
@@ -59,6 +59,9 @@
                 <version>2.15</version>
                 <configuration>
                     <testFailureIgnore>true</testFailureIgnore>
+                    <argLine>-Xmx2048m -XX:MaxPermSize=512m</argLine>
+                    <forkCount>1</forkCount>
+                    <reuseForks>false</reuseForks>
                 </configuration>
                 <executions>
                     <execution>
diff --git 
a/automation/sqlrepo/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
 
b/automation/sqlrepo/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
index b5720ece..2d491ff5 100644
--- 
a/automation/sqlrepo/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
+++ 
b/automation/sqlrepo/features/gpupgrade/extension2_0/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
@@ -40,11 +40,11 @@ FROM pg_catalog.pg_extension AS e
     INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid)
 WHERE d.deptype = 'e' AND e.extname = 'pxf'
 ORDER BY 1;
-       proname      |            prosrc            |             probin
---------------------+------------------------------+----------------------------------
- pxf_read           | pxfprotocol_import           | $PXF_HOME/gpextable/pxf
- pxf_validate       | pxfprotocol_validate_urls    | $PXF_HOME/gpextable/pxf
- pxf_write          | pxfprotocol_export           | $PXF_HOME/gpextable/pxf
- pxfwritable_export | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf
- pxfwritable_import | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf
+       proname      |            prosrc            |   probin
+--------------------+------------------------------+-------------
+ pxf_read           | pxfprotocol_import           | $libdir/pxf
+ pxf_validate       | pxfprotocol_validate_urls    | $libdir/pxf
+ pxf_write          | pxfprotocol_export           | $libdir/pxf
+ pxfwritable_export | gpdbwritableformatter_export | $libdir/pxf
+ pxfwritable_import | gpdbwritableformatter_import | $libdir/pxf
 (5 rows)
diff --git 
a/automation/sqlrepo/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
 
b/automation/sqlrepo/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
index 36314ef5..44a614a0 100644
--- 
a/automation/sqlrepo/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
+++ 
b/automation/sqlrepo/features/gpupgrade/extension2_1/step_1_before_running_pxf_pre_gpupgrade/expected/query01.ans
@@ -40,13 +40,13 @@ FROM pg_catalog.pg_extension AS e
     INNER JOIN pg_catalog.pg_proc AS p ON (p.oid = d.objid)
 WHERE d.deptype = 'e' AND e.extname = 'pxf'
 ORDER BY 1;
-       proname       |            prosrc            |             probin
----------------------+------------------------------+----------------------------------
- pxf_read            | pxfprotocol_import           | $PXF_HOME/gpextable/pxf
- pxf_validate        | pxfprotocol_validate_urls    | $PXF_HOME/gpextable/pxf
- pxf_write           | pxfprotocol_export           | $PXF_HOME/gpextable/pxf
- pxfdelimited_import | pxfdelimited_import          | $PXF_HOME/gpextable/pxf
- pxfwritable_export  | gpdbwritableformatter_export | $PXF_HOME/gpextable/pxf
- pxfwritable_import  | gpdbwritableformatter_import | $PXF_HOME/gpextable/pxf
+       proname       |            prosrc            |   probin
+---------------------+------------------------------+-------------
+ pxf_read            | pxfprotocol_import           | $libdir/pxf
+ pxf_validate        | pxfprotocol_validate_urls    | $libdir/pxf
+ pxf_write           | pxfprotocol_export           | $libdir/pxf
+ pxfdelimited_import | pxfdelimited_import          | $libdir/pxf
+ pxfwritable_export  | gpdbwritableformatter_export | $libdir/pxf
+ pxfwritable_import  | gpdbwritableformatter_import | $libdir/pxf
 (6 rows)
 
diff --git 
a/automation/sqlrepo/features/hcfs/globbing/match_string_from_string_set/expected/query04.ans
 
b/automation/sqlrepo/features/hcfs/globbing/match_string_from_string_set/expected/query04.ans
index 1d24e93b..5d99621f 100644
--- 
a/automation/sqlrepo/features/hcfs/globbing/match_string_from_string_set/expected/query04.ans
+++ 
b/automation/sqlrepo/features/hcfs/globbing/match_string_from_string_set/expected/query04.ans
@@ -247,14 +247,14 @@ select * from hcfs_glob_match_string_from_string_set_10 
order by name, num;
 -- m/CONTEXT:.*file.*/
 -- s/, file.*//g
 --
+-- m/CONTEXT:.*External table.*line \d* of pxf.*/
+-- s/, line \d* of pxf.*//g
+--
 -- end_matchsubs
 select * from hcfs_glob_match_string_from_string_set_11 order by name, num;
 ERROR:  PXF server error : Unclosed group near index xxx
-GP_IGNORE:GP_IGNORE:-- start_ignore
-GP_IGNORE:GP_IGNORE:HINT:  Check the PXF logs located in the 'logs-dir' 
directory on host 'mdw' or 'set client_min_messages=LOG' for additional details.
-GP_IGNORE:GP_IGNORE:-- end_ignore
-CONTEXT:  External table hcfs_glob_match_string_from_string_set_11
-
+HINT:  Check the PXF logs located in the '/home/gpadmin/pxf-base/logs' 
directory on host 'localhost' or 'set client_min_messages=LOG' for additional 
details.
+CONTEXT:  External table hcfs_glob_match_string_from_string_set_11, 
pxf://pxf_automation_data?PROFILE=*:text: ""
 -- }\{bc will match }{bc but it will not match }bc
 select * from hcfs_glob_match_string_from_string_set_12 order by name, num;
       name      | num | dub |    longnum    | bool 
diff --git 
a/automation/sqlrepo/features/hdfs/writable/json/invalid_encoding/expected/query01.ans
 
b/automation/sqlrepo/features/hdfs/writable/json/invalid_encoding/expected/query01.ans
index e5b7729b..af75fe8e 100644
--- 
a/automation/sqlrepo/features/hdfs/writable/json/invalid_encoding/expected/query01.ans
+++ 
b/automation/sqlrepo/features/hdfs/writable/json/invalid_encoding/expected/query01.ans
@@ -7,4 +7,4 @@
 -- end_matchsubs
 
 INSERT INTO pxf_invalid_encoding_json_write SELECT * from gpdb_primitive_types;
-ERROR:  pxfwritable_export formatter can only export UTF8 formatted data. 
Define the external table with ENCODING UTF8
+ERROR:  gpdbwritable formatter can only export UTF8 formatted data. Define the 
external table with ENCODING UTF8
diff --git 
a/automation/sqlrepo/features/jdbc/session_params/expected/query01.ans 
b/automation/sqlrepo/features/jdbc/session_params/expected/query01.ans
index 95e84a15..47a6535a 100644
--- a/automation/sqlrepo/features/jdbc/session_params/expected/query01.ans
+++ b/automation/sqlrepo/features/jdbc/session_params/expected/query01.ans
@@ -5,7 +5,7 @@
 SELECT * FROM pxf_jdbc_read_view_no_params WHERE name='client_min_messages' OR 
name='default_statistics_target' ORDER BY name;
            name            | setting
 ---------------------------+---------
- client_min_messages       | notice
+ client_min_messages       | error
  default_statistics_target | 100
 (2 rows)
 
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/CloudAccessTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/CloudAccessTest.java
index c234d9cb..c4477da5 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/CloudAccessTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/CloudAccessTest.java
@@ -8,6 +8,7 @@ import org.greenplum.pxf.automation.features.BaseFeature;
 import org.greenplum.pxf.automation.structures.tables.basic.Table;
 import org.greenplum.pxf.automation.structures.tables.utils.TableFactory;
 import org.greenplum.pxf.automation.utils.system.ProtocolUtils;
+import org.greenplum.pxf.automation.utils.system.ProtocolEnum;
 import org.testng.annotations.Test;
 
 import java.net.URI;
@@ -42,6 +43,9 @@ public class CloudAccessTest extends BaseFeature {
      */
     @Override
     public void beforeClass() throws Exception {
+        if (ProtocolUtils.getProtocol() == ProtocolEnum.HDFS) {
+            return;
+        }
         // Initialize server objects
         String random = UUID.randomUUID().toString();
         s3PathRead  = 
String.format("gpdb-ud-scratch/tmp/pxf_automation_data_read/%s/" , random);
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/S3SelectTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/S3SelectTest.java
index 78b2a944..ca2590ed 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/S3SelectTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/cloud/S3SelectTest.java
@@ -5,6 +5,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.greenplum.pxf.automation.components.hdfs.Hdfs;
 import org.greenplum.pxf.automation.features.BaseFeature;
 import 
org.greenplum.pxf.automation.structures.tables.pxf.ReadableExternalTable;
+import org.greenplum.pxf.automation.utils.system.ProtocolEnum;
 import org.greenplum.pxf.automation.utils.system.ProtocolUtils;
 import org.testng.annotations.Test;
 
@@ -55,6 +56,9 @@ public class S3SelectTest extends BaseFeature {
      */
     @Override
     public void beforeClass() throws Exception {
+        if (ProtocolUtils.getProtocol() == ProtocolEnum.HDFS) {
+            return;
+        }
         // Initialize server objects
         s3Path = 
String.format("gpdb-ud-scratch/tmp/pxf_automation_data/%s/s3select/", 
UUID.randomUUID().toString());
         Configuration s3Configuration = new Configuration();
@@ -73,7 +77,7 @@ public class S3SelectTest extends BaseFeature {
         }
     }
 
-    @Test(groups = {"gpdb", "s3"})
+    @Test(groups = {"s3"})
     public void testPlainCsvWithHeaders() throws Exception {
         String[] userParameters = {"FILE_HEADER=IGNORE", "S3_SELECT=ON"};
         runTestScenario("csv", "s3", "csv", s3Path,
@@ -81,7 +85,7 @@ public class S3SelectTest extends BaseFeature {
                 "|", userParameters);
     }
 
-    @Test(groups = {"gpdb", "s3"})
+    @Test(groups = {"s3"})
     public void testPlainCsvWithHeadersUsingHeaderInfo() throws Exception {
         String[] userParameters = {"FILE_HEADER=USE", "S3_SELECT=ON"};
         runTestScenario("csv_use_headers", "s3", "csv", s3Path,
@@ -89,7 +93,7 @@ public class S3SelectTest extends BaseFeature {
                 "|", userParameters);
     }
 
-    @Test(groups = {"gpdb", "s3"})
+    @Test(groups = {"s3"})
     public void testCsvWithHeadersUsingHeaderInfoWithWrongColumnNames() throws 
Exception {
         String[] userParameters = {"FILE_HEADER=USE", "S3_SELECT=ON"};
         runTestScenario("errors/", "csv_use_headers_with_wrong_col_names", 
"s3", "csv", s3Path,
@@ -97,7 +101,7 @@ public class S3SelectTest extends BaseFeature {
                 "|", userParameters, PXF_S3_SELECT_INVALID_COLS);
     }
 
-    @Test(groups = {"gpdb", "s3"})
+    @Test(groups = {"s3"})
     public void testPlainCsvWithNoHeaders() throws Exception {
         String[] userParameters = {"FILE_HEADER=NONE", "S3_SELECT=ON"};
         runTestScenario("csv_noheaders", "s3", "csv", s3Path,
@@ -105,7 +109,7 @@ public class S3SelectTest extends BaseFeature {
                 "|", userParameters);
     }
 
-    @Test(groups = {"gpdb", "s3"})
+    @Test(groups = {"s3"})
     public void testGzipCsvWithHeadersUsingHeaderInfo() throws Exception {
         String[] userParameters = {"FILE_HEADER=USE", "S3_SELECT=ON", 
"COMPRESSION_CODEC=gzip"};
         runTestScenario("gzip_csv_use_headers", "s3", "csv", s3Path,
@@ -113,7 +117,7 @@ public class S3SelectTest extends BaseFeature {
                 "|", userParameters);
     }
 
-    @Test(groups = {"gpdb", "s3"})
+    @Test(groups = {"s3"})
     public void testBzip2CsvWithHeadersUsingHeaderInfo() throws Exception {
         String[] userParameters = {"FILE_HEADER=USE", "S3_SELECT=ON", 
"COMPRESSION_CODEC=bzip2"};
         runTestScenario("bzip2_csv_use_headers", "s3", "csv", s3Path,
@@ -121,7 +125,7 @@ public class S3SelectTest extends BaseFeature {
                 "|", userParameters);
     }
 
-    @Test(groups = {"gpdb", "s3"})
+    @Test(groups = {"s3"})
     public void testParquet() throws Exception {
         String[] userParameters = {"S3_SELECT=ON"};
         runTestScenario("parquet", "s3", "parquet", s3Path,
@@ -129,7 +133,7 @@ public class S3SelectTest extends BaseFeature {
                 null, userParameters);
     }
 
-    @Test(groups = {"gpdb", "s3"})
+    @Test(groups = {"s3"})
     public void testParquetWildcardLocation() throws Exception {
         String[] userParameters = {"S3_SELECT=ON"};
         runTestScenario("", "parquet", "s3", "parquet", s3Path,
@@ -137,7 +141,7 @@ public class S3SelectTest extends BaseFeature {
                 null, userParameters, LINEITEM_SCHEMA);
     }
 
-    @Test(groups = {"gpdb", "s3"})
+    @Test(groups = {"s3"})
     public void testSnappyParquet() throws Exception {
         String[] userParameters = {"S3_SELECT=ON"};
         runTestScenario("parquet_snappy", "s3", "parquet", s3Path,
@@ -145,7 +149,7 @@ public class S3SelectTest extends BaseFeature {
                 null, userParameters);
     }
 
-    @Test(groups = {"gpdb", "s3"})
+    @Test(groups = {"s3"})
     public void testGzipParquet() throws Exception {
         String[] userParameters = {"S3_SELECT=ON"};
         runTestScenario("parquet_gzip", "s3", "parquet", s3Path,
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/general/FailOverTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/general/FailOverTest.java
index 374df747..22417443 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/general/FailOverTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/general/FailOverTest.java
@@ -30,7 +30,11 @@ public class FailOverTest extends BaseFeature {
         super.afterClass();
         // We need to restore the service after it has been stopped
         if (cluster != null) {
-            cluster.start(PhdCluster.EnumClusterServices.pxf);
+            try {
+                cluster.start(PhdCluster.EnumClusterServices.pxf);
+            } catch (Exception e) {
+                // Ignore if service is already running
+            }
         }
     }
 
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/gpupgrade/GpupgradeTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/gpupgrade/GpupgradeTest.java
index 14770b3c..0ed666cf 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/gpupgrade/GpupgradeTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/gpupgrade/GpupgradeTest.java
@@ -20,18 +20,18 @@ public class GpupgradeTest extends BaseFunctionality {
     private ReadableExternalTable externalTable;
 
     @Override
-    protected void beforeMethod() throws Exception {
-        super.beforeMethod();
+    protected void beforeClass() throws Exception {
+        super.beforeClass();
         String location = prepareData();
         createReadablePxfTable("default", location);
     }
 
     @Override
-    protected void afterMethod() throws Exception {
+    protected void afterClass() throws Exception {
         if (gpdb != null) {
             gpdb.dropTable(externalTable, true);
         }
-        super.afterMethod();
+        super.afterClass();
     }
 
     @Test(groups = {"features", "gpdb"})
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveBaseTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveBaseTest.java
index 7c21322d..b537e8f4 100755
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveBaseTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveBaseTest.java
@@ -371,11 +371,25 @@ public class HiveBaseTest extends BaseFeature {
 
     void loadDataIntoHive(Hdfs hdfs, Hive hive, String fileName, HiveTable 
tableName) throws Exception {
 
+        String localPath = localDataResourcesFolder + "/hive/" + fileName;
+        String hdfsPath = hdfs.getWorkingDirectory() + "/" + fileName;
+
+        // Verify local file exists
+        java.io.File localFile = new java.io.File(localPath);
+        if (!localFile.exists()) {
+            throw new RuntimeException("Local file does not exist: " + 
localFile.getAbsolutePath());
+        }
+
         // copy data to hdfs
-        hdfs.copyFromLocal(localDataResourcesFolder + "/hive/" + fileName,
-                hdfs.getWorkingDirectory() + "/" + fileName);
+        hdfs.copyFromLocal(localPath, hdfsPath);
+
+        // Verify file was copied to HDFS
+        if (!hdfs.doesFileExist(hdfsPath)) {
+            throw new RuntimeException("File was not copied to HDFS: " + 
hdfsPath);
+        }
+        
         // load to hive table
-        hive.loadData(tableName, hdfs.getWorkingDirectory() + "/" + fileName, 
false);
+        hive.loadData(tableName, hdfsPath, false);
     }
 
     String[] hiveTestFilter(String filterString) {
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcHiveTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcHiveTest.java
index 37b9d32e..a546ce01 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcHiveTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/jdbc/JdbcHiveTest.java
@@ -166,7 +166,7 @@ public class JdbcHiveTest extends BaseFeature {
     }
 
     protected void createTables(Hive hive, String serverName, String 
gpdbTypesTableName, String gpdbQueryTableName) throws Exception {
-        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default";
+        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default;auth=noSasl";
         String user = null;
 
         // On kerberized cluster, enabled then we need the 
hive/hiveserver2_hostname principal in the connection string.
@@ -219,7 +219,7 @@ public class JdbcHiveTest extends BaseFeature {
             hiveReadable = TableFactory.getPxfJdbcReadableTable(
                     hiveReadableName, GPDB_WRITE_TYPES_TABLE_FIELDS, 
targetHiveTable.getFullName(), serverName);
         } else {
-            String jdbcUrl = String.format("%s%s:10000/default", 
HIVE_JDBC_URL_PREFIX, hive.getHost());
+            String jdbcUrl = String.format("%s%s:10000/default;auth=noSasl", 
HIVE_JDBC_URL_PREFIX, hive.getHost());
             // create GPDB external table for writing data from GPDB to Hive 
with JDBC profile
             hiveWritable = TableFactory.getPxfJdbcWritableTable(
                     hiveWritableName, GPDB_WRITE_TYPES_TABLE_FIELDS, 
targetHiveTable.getFullName(),
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/multiserver/MultiServerTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/multiserver/MultiServerTest.java
index ff96415b..3c142db8 100755
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/multiserver/MultiServerTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/multiserver/MultiServerTest.java
@@ -9,6 +9,7 @@ import 
org.greenplum.pxf.automation.structures.tables.basic.Table;
 import org.greenplum.pxf.automation.structures.tables.pxf.ExternalTable;
 import org.greenplum.pxf.automation.structures.tables.utils.TableFactory;
 import org.greenplum.pxf.automation.utils.system.ProtocolUtils;
+import org.greenplum.pxf.automation.utils.system.ProtocolEnum;
 import org.testng.annotations.Test;
 
 import java.net.URI;
@@ -47,6 +48,9 @@ public class MultiServerTest extends BaseFeature {
      */
     @Override
     public void beforeClass() throws Exception {
+        if (ProtocolUtils.getProtocol() == ProtocolEnum.HDFS) {
+            return;
+        }
         // Initialize an additional HDFS system object (optional system object)
         hdfs2 = (Hdfs) systemManager.
                 getSystemObject("/sut", "hdfs2", -1, null, false, null, 
SutFactory.getInstance().getSutInstance());
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/orc/OrcWriteTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/orc/OrcWriteTest.java
index 15a1a23b..560fa063 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/orc/OrcWriteTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/orc/OrcWriteTest.java
@@ -207,7 +207,7 @@ public class OrcWriteTest extends BaseFeature {
         hive.runQuery(ctasHiveQuery);
 
         // use the Hive JDBC profile to avoid using the PXF ORC reader 
implementation
-        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default";
+        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default;auth=noSasl";
         ExternalTable exHiveJdbcTable = TableFactory.getPxfJdbcReadableTable(
                 gpdbTableNamePrefix + "_readable", 
ORC_PRIMITIVE_TABLE_COLUMNS_READ_FROM_HIVE,
                 hiveTable.getName() + "_ctas", HIVE_JDBC_DRIVER_CLASS, 
jdbcUrl, null);
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java
index 8bd235b6..0ec32b21 100644
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/parquet/ParquetWriteTest.java
@@ -369,7 +369,7 @@ public class ParquetWriteTest extends BaseWritableFeature {
         }
 
         // use the Hive JDBC profile to avoid using the PXF Parquet reader 
implementation
-        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default";
+        String jdbcUrl = HIVE_JDBC_URL_PREFIX + hive.getHost() + 
":10000/default;auth=noSasl";
 
         ExternalTable exHiveJdbcTable = TableFactory.getPxfJdbcReadableTable(
                 readTableName, 
PARQUET_PRIMITIVE_ARRAYS_TABLE_COLUMNS_READ_FROM_HIVE,
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/writable/HdfsWritableSequenceTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/writable/HdfsWritableSequenceTest.java
index 314ac355..cdc8f020 100755
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/writable/HdfsWritableSequenceTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/writable/HdfsWritableSequenceTest.java
@@ -184,38 +184,38 @@ public class HdfsWritableSequenceTest extends 
BaseWritableFeature {
     @Test(groups = {"features", "gpdb", "hcfs", "security"})
     public void negativeCharType() throws Exception {
 
-        String[] fields = {"a1 INTEGER", "c1 CHAR"};
+        String[] fields = {"a1 INTEGER", "c1 JSON"};
         String hdfsDir = hdfsPath + "/negative_char_type";
         writableExTable = 
prepareWritableSequenceTable("pxf_negative_char_type_w",
                 fields, hdfsDir, schemaPackage + customSchemaWithCharFileName);
 
         Table dataTable = new Table("data", null);
-        dataTable.addRow(new String[]{"100", "a"});
-        dataTable.addRow(new String[]{"1000", "b"});
+        dataTable.addRow(new String[]{"100", "'{\"key\":\"value\"}'"});
+        dataTable.addRow(new String[]{"1000", "'{\"key\":\"value\"}'"});
 
         try {
             gpdb.insertData(dataTable, writableExTable);
             Assert.fail("Insert data should fail because of unsupported type");
         } catch (PSQLException e) {
-            ExceptionUtils.validate(null, e, new PSQLException("ERROR.*Type 
char is not supported " +
+            ExceptionUtils.validate(null, e, new PSQLException("ERROR.*Type 
json is not supported " +
                     "by GPDBWritable.*?", null), true);
         }
     }
 
     /**
-     * Test COMPRESSION_TYPE = NONE -- negative
+     * Test COMPRESSION_TYPE = INVALID -- negative
      *
      * @throws Exception if test fails to run
      */
     @Test(groups = {"features", "gpdb", "hcfs", "security"})
     public void negativeCompressionTypeNone() throws Exception {
 
-        String[] fields = {"a1 INTEGER", "c1 CHAR"};
+        String[] fields = {"a1 INTEGER", "c1 TEXT"};
         String hdfsDir = hdfsPath + "/negative_compression_type_none";
 
         writableExTable = 
prepareWritableSequenceTable("pxf_negative_compression_type_none",
                 fields, hdfsDir, schemaPackage + customSchemaWithCharFileName,
-                new String[]{"COMPRESSION_TYPE=NONE"}, null);
+                new String[]{"COMPRESSION_TYPE=XZ"}, null);
 
         Table dataTable = new Table("data", null);
         dataTable.addRow(new String[]{"100", "a"});
@@ -226,8 +226,7 @@ public class HdfsWritableSequenceTest extends 
BaseWritableFeature {
             Assert.fail("Insert data should fail because of illegal 
compression type");
         } catch (PSQLException e) {
             ExceptionUtils.validate(null, e,
-                    new PSQLException("ERROR.*Illegal compression type 
'NONE'\\. For disabling compression " +
-                            "remove COMPRESSION_CODEC parameter\\..*?", null), 
true);
+                    new PSQLException("ERROR.*Illegal compression type 
'XZ'.*?", null), true);
         }
     }
 
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_cloudberrry.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_cloudberrry.sh
index 3a08dddc..f0caad50 100755
--- a/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_cloudberrry.sh
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_cloudberrry.sh
@@ -75,7 +75,8 @@ sudo apt install -y bison \
   python3-dev \
   python3-pip \
   python3-setuptools \
-  rsync
+  rsync \
+  libsnappy-dev
 
 # Continue as gpadmin user
 
diff --git 
a/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_cloudberry_deb.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_cloudberry_deb.sh
new file mode 100755
index 00000000..eb3b8d87
--- /dev/null
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_cloudberry_deb.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+set -euo pipefail
+
+# Cloudberry DEB Package Build Script for Ubuntu 22.04
+CLOUDBERRY_VERSION="${CLOUDBERRY_VERSION:-99.0.0}"
+CLOUDBERRY_BUILD="${CLOUDBERRY_BUILD:-1}"
+INSTALL_PREFIX="${INSTALL_PREFIX:-/usr/local/cloudberry-db}"
+WORKSPACE="${WORKSPACE:-$HOME/workspace}"
+CLOUDBERRY_SRC="${WORKSPACE}/cloudberry"
+
+echo "=== Cloudberry DEB Package Build ==="
+echo "Version: ${CLOUDBERRY_VERSION}"
+echo "Build: ${CLOUDBERRY_BUILD}"
+echo "Install Prefix: ${INSTALL_PREFIX}"
+echo "Source: ${CLOUDBERRY_SRC}"
+
+# Clean previous installation
+rm -rf "${INSTALL_PREFIX}"
+mkdir -p "${INSTALL_PREFIX}"
+
+# Configure Cloudberry
+cd "${CLOUDBERRY_SRC}"
+./configure --prefix="${INSTALL_PREFIX}" \
+            --disable-external-fts \
+            --enable-gpcloud \
+            --enable-ic-proxy \
+            --enable-mapreduce \
+            --enable-orafce \
+            --enable-orca \
+            --disable-pax \
+            --enable-pxf \
+            --enable-tap-tests \
+            --with-gssapi \
+            --with-ldap \
+            --with-libxml \
+            --with-lz4 \
+            --with-pam \
+            --with-perl \
+            --with-pgport=5432 \
+            --with-python \
+            --with-pythonsrc-ext \
+            --with-ssl=openssl \
+            --with-uuid=e2fs \
+            --with-includes=/usr/include/xercesc
+
+# Build and install
+make -j$(nproc)
+make -j$(nproc) -C contrib
+make install
+make install -C contrib
+
+# Copy LICENSE
+cp LICENSE "${INSTALL_PREFIX}/"
+
+# Create deb package structure
+DEB_BUILD_DIR="${WORKSPACE}/cloudberry-deb"
+DEB_PKG_DIR="${DEB_BUILD_DIR}/apache-cloudberry-db_${CLOUDBERRY_VERSION}-${CLOUDBERRY_BUILD}_amd64"
+mkdir -p "${DEB_PKG_DIR}/DEBIAN"
+mkdir -p "${DEB_PKG_DIR}${INSTALL_PREFIX}"
+
+# Copy installed files
+cp -a "${INSTALL_PREFIX}"/* "${DEB_PKG_DIR}${INSTALL_PREFIX}/"
+
+# Create control file
+cat > "${DEB_PKG_DIR}/DEBIAN/control" << EOF
+Package: apache-cloudberry-db
+Version: ${CLOUDBERRY_VERSION}-${CLOUDBERRY_BUILD}
+Section: database
+Priority: optional
+Architecture: amd64
+Maintainer: Apache Cloudberry <[email protected]>
+Description: Apache Cloudberry Database
+ Apache Cloudberry is a massively parallel processing (MPP) database
+ built on PostgreSQL for analytics and data warehousing.
+Depends: libc6, libssl3, libreadline8, libxml2, libxerces-c3.2, liblz4-1, 
libzstd1, libapr1, libcurl4, libevent-2.1-7, libkrb5-3, libldap-2.5-0, 
libpam0g, libuv1, libyaml-0-2
+EOF
+
+# Create postinst script
+cat > "${DEB_PKG_DIR}/DEBIAN/postinst" << 'EOF'
+#!/bin/bash
+set -e
+if ! id -u gpadmin >/dev/null 2>&1; then
+    useradd -m -s /bin/bash gpadmin
+fi
+chown -R gpadmin:gpadmin /usr/local/cloudberry-db
+echo "Apache Cloudberry Database installed successfully"
+EOF
+
+chmod 755 "${DEB_PKG_DIR}/DEBIAN/postinst"
+
+# Build deb package
+cd "${DEB_BUILD_DIR}"
+dpkg-deb --build "$(basename ${DEB_PKG_DIR})"
+
+DEB_FILE="${DEB_BUILD_DIR}/apache-cloudberry-db_${CLOUDBERRY_VERSION}-${CLOUDBERRY_BUILD}_amd64.deb"
+echo "=== DEB Package Created ==="
+ls -lh "${DEB_FILE}"
+dpkg-deb -I "${DEB_FILE}"
+echo "=== Build Complete ==="
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_pxf.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_pxf.sh
index b7d2028c..a644c1e4 100755
--- a/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_pxf.sh
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_pxf.sh
@@ -13,6 +13,10 @@ sudo apt install -y openjdk-11-jdk maven
 
 cd /home/gpadmin/workspace/cloudberry-pxf
 
+# Ensure gpadmin owns the source directory
+sudo chown -R gpadmin:gpadmin /home/gpadmin/workspace/cloudberry-pxf
+sudo chown -R gpadmin:gpadmin /usr/local/cloudberry-db
+
 # mirror
 # If the download fails, you can uncomment the line to switch to another 
mirror address.
 # Configure Gradle to use Aliyun mirror
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh
index b180c10e..3c98b54c 100755
--- a/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh
@@ -82,8 +82,120 @@ EOF
   fi
 }
 
+install_cloudberry_from_deb() {
+  log "installing Cloudberry from .deb package"
+  local deb_file=$(find /tmp -name "apache-cloudberry-db*.deb" 2>/dev/null | 
head -1)
+  if [ -z "$deb_file" ]; then
+    die "No .deb package found in /tmp"
+  fi
+
+  # Install sudo & git
+  sudo apt update && sudo apt install -y sudo git
+
+  # Required configuration
+  ## Add Cloudberry environment setup to .bashrc
+  echo -e '\n# Add Cloudberry entries
+  if [ -f /usr/local/cloudberry-db/cloudberry-env.sh ]; then
+    source /usr/local/cloudberry-db/cloudberry-env.sh
+  fi
+  ## US English with UTF-8 character encoding
+  export LANG=en_US.UTF-8
+  ' >> /home/gpadmin/.bashrc
+  ## Set up SSH for passwordless access
+  mkdir -p /home/gpadmin/.ssh
+  if [ ! -f /home/gpadmin/.ssh/id_rsa ]; then
+    ssh-keygen -t rsa -b 2048 -C 'apache-cloudberry-dev' -f 
/home/gpadmin/.ssh/id_rsa -N ""
+  fi
+  cat /home/gpadmin/.ssh/id_rsa.pub >> /home/gpadmin/.ssh/authorized_keys
+  ## Set proper SSH directory permissions
+  chmod 700 /home/gpadmin/.ssh
+  chmod 600 /home/gpadmin/.ssh/authorized_keys
+  chmod 644 /home/gpadmin/.ssh/id_rsa.pub
+
+# Configure system settings
+sudo tee /etc/security/limits.d/90-db-limits.conf << 'EOF'
+## Core dump file size limits for gpadmin
+gpadmin soft core unlimited
+gpadmin hard core unlimited
+## Open file limits for gpadmin
+gpadmin soft nofile 524288
+gpadmin hard nofile 524288
+## Process limits for gpadmin
+gpadmin soft nproc 131072
+gpadmin hard nproc 131072
+EOF
+
+  # Verify resource limits
+  ulimit -a
+
+  # Install basic system packages
+  sudo apt update
+  sudo apt install -y bison \
+    bzip2 \
+    cmake \
+    curl \
+    flex \
+    gcc \
+    g++ \
+    iproute2 \
+    iputils-ping \
+    language-pack-en \
+    locales \
+    libapr1-dev \
+    libbz2-dev \
+    libcurl4-gnutls-dev \
+    libevent-dev \
+    libkrb5-dev \
+    libipc-run-perl \
+    libldap2-dev \
+    libpam0g-dev \
+    libprotobuf-dev \
+    libreadline-dev \
+    libssl-dev \
+    libuv1-dev \
+    liblz4-dev \
+    libxerces-c-dev \
+    libxml2-dev \
+    libyaml-dev \
+    libzstd-dev \
+    libperl-dev \
+    make \
+    pkg-config \
+    protobuf-compiler \
+    python3-dev \
+    python3-pip \
+    python3-setuptools \
+    rsync \
+    libsnappy-dev
+
+
+  # Continue as gpadmin user
+
+
+  # Prepare the build environment for Apache Cloudberry
+  sudo rm -rf /usr/local/cloudberry-db
+  sudo chmod a+w /usr/local
+  mkdir -p /usr/local/cloudberry-db
+  sudo chown -R gpadmin:gpadmin /usr/local/cloudberry-db
+
+  sudo dpkg -i "$deb_file" || sudo apt-get install -f -y
+  log "Cloudberry installed from $deb_file"
+  
+  # Initialize and start Cloudberry cluster
+  source /usr/local/cloudberry-db/cloudberry-env.sh
+  make create-demo-cluster -C ~/workspace/cloudberry || {
+    log "create-demo-cluster failed, trying manual setup"
+    cd ~/workspace/cloudberry
+    ./configure --prefix=/usr/local/cloudberry-db --enable-debug --with-perl 
--with-python --with-libxml --enable-depend
+    make create-demo-cluster
+  }
+  source ~/workspace/cloudberry/gpAux/gpdemo/gpdemo-env.sh
+  psql -P pager=off template1 -c 'SELECT * from gp_segment_configuration'
+  psql template1 -c 'SELECT version()'
+}
+
 build_cloudberry() {
-  log "build Cloudberry"
+  log "building Cloudberry from source"
   log "cleanup stale gpdemo data and PG locks"
   rm -rf /home/gpadmin/workspace/cloudberry/gpAux/gpdemo/datadirs
   rm -f /tmp/.s.PGSQL.700*
@@ -91,6 +203,19 @@ build_cloudberry() {
   "${PXF_SCRIPTS}/build_cloudberrry.sh"
 }
 
+setup_cloudberry() {
+  # Auto-detect: if deb exists, install it; otherwise build from source
+  if [ -f /tmp/apache-cloudberry-db*.deb ]; then
+    log "detected .deb package, using fast install"
+    install_cloudberry_from_deb
+  elif [ "${CLOUDBERRY_USE_DEB:-}" = "true" ]; then
+    die "CLOUDBERRY_USE_DEB=true but no .deb found in /tmp"
+  else
+    log "no .deb found, building from source (local dev mode)"
+    build_cloudberry
+  fi
+}
+
 build_pxf() {
   log "build PXF"
   "${PXF_SCRIPTS}/build_pxf.sh"
@@ -123,7 +248,7 @@ XML
     sed -i 's#</configuration>#  <property>\n    
<name>pxf.service.user.name</name>\n    <value>foobar</value>\n  </property>\n  
<property>\n    <name>pxf.service.user.impersonation</name>\n    
<value>false</value>\n  </property>\n</configuration>#' 
"$PXF_BASE/servers/default-no-impersonation/pxf-site.xml"
   fi
 
-  # Configure pxf-profiles.xml for Parquet support
+  # Configure pxf-profiles.xml for Parquet and test profiles
   cat > "$PXF_BASE/conf/pxf-profiles.xml" <<'EOF'
 <?xml version="1.0" encoding="UTF-8"?>
 <profiles>
@@ -136,6 +261,15 @@ XML
             <resolver>org.greenplum.pxf.plugins.hdfs.ParquetResolver</resolver>
         </plugins>
     </profile>
+    <profile>
+        <name>test:text</name>
+        <description>Test profile for text files</description>
+        <plugins>
+            
<fragmenter>org.greenplum.pxf.plugins.hdfs.HdfsDataFragmenter</fragmenter>
+            
<accessor>org.greenplum.pxf.plugins.hdfs.LineBreakAccessor</accessor>
+            
<resolver>org.greenplum.pxf.plugins.hdfs.StringPassResolver</resolver>
+        </plugins>
+    </profile>
 </profiles>
 EOF
 
@@ -151,6 +285,15 @@ EOF
             <resolver>org.greenplum.pxf.plugins.hdfs.ParquetResolver</resolver>
         </plugins>
     </profile>
+    <profile>
+        <name>test:text</name>
+        <description>Test profile for text files</description>
+        <plugins>
+            
<fragmenter>org.greenplum.pxf.plugins.hdfs.HdfsDataFragmenter</fragmenter>
+            
<accessor>org.greenplum.pxf.plugins.hdfs.LineBreakAccessor</accessor>
+            
<resolver>org.greenplum.pxf.plugins.hdfs.StringPassResolver</resolver>
+        </plugins>
+    </profile>
 </profiles>
 EOF
 
@@ -289,6 +432,33 @@ start_hive_services() {
   # start HS2 with NOSASL
   HIVE_OPTS="--hiveconf hive.server2.authentication=NOSASL --hiveconf 
hive.metastore.uris=thrift://localhost:9083 --hiveconf 
javax.jdo.option.ConnectionURL=jdbc:derby:;databaseName=${GPHD_ROOT}/storage/hive/metastore_db;create=true"
 \
     "${GPHD_ROOT}/bin/hive-service.sh" hiveserver2 start
+
+  # wait for HiveServer2 to be ready
+  log "waiting for HiveServer2 to start on port 10000..."
+  for i in {1..60}; do
+    if ss -ln | grep -q ":10000 " || lsof -i :10000 >/dev/null 2>&1; then
+      log "HiveServer2 port is listening, testing connection..."
+      if echo "SHOW DATABASES;" | beeline -u 
"jdbc:hive2://localhost:10000/default" --silent=true >/dev/null 2>&1; then
+        log "HiveServer2 is ready and accessible"
+        break
+      else
+        log "HiveServer2 port is up but not ready for connections, waiting... 
(attempt $i/60)"
+      fi
+    else
+      log "HiveServer2 port 10000 not yet listening... (attempt $i/60)"
+    fi
+    if [ $i -eq 60 ]; then
+      log "ERROR: HiveServer2 failed to start properly after 60 seconds"
+      log "Checking HiveServer2 process:"
+      pgrep -f HiveServer2 || log "No HiveServer2 process found"
+      log "Checking port 10000:"
+      ss -ln | grep ":10000" || lsof -i :10000 || log "Port 10000 not 
listening"
+      log "HiveServer2 logs:"
+      tail -20 "${GPHD_ROOT}/storage/logs/hive-gpadmin-hiveserver2-mdw.out" 
2>/dev/null || log "No HiveServer2 log found"
+      exit 1
+    fi
+    sleep 1
+  done
 }
 
 run_tests() {
@@ -309,7 +479,7 @@ main() {
   detect_java_paths
   setup_locale_and_packages
   setup_ssh
-  build_cloudberry
+  setup_cloudberry
   relax_pg_hba
   build_pxf
   configure_pxf
@@ -318,7 +488,6 @@ main() {
   health_check
   #run_tests
   log "entrypoint finished; keeping container alive"
-  tail -f /dev/null
 }
 
 main "$@"
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/pxf-test.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/pxf-test.sh
new file mode 100755
index 00000000..ede1896b
--- /dev/null
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/pxf-test.sh
@@ -0,0 +1,196 @@
+#!/bin/bash
+set -euo pipefail
+
+RUN_TESTS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+REPO_ROOT="$(cd "${RUN_TESTS_DIR}/../../../../.." && pwd)"
+
+# Load env
+source "${RUN_TESTS_DIR}/pxf-env.sh"
+
+# Test results tracking
+declare -A TEST_RESULTS
+RESULTS_FILE="${REPO_ROOT}/automation/test_artifacts/component_results.csv"
+
+# Ensure artifacts directory
+mkdir -p "${REPO_ROOT}/automation/test_artifacts"
+
+# Initialize results file
+echo "Component,Status,ExitCode" > "$RESULTS_FILE"
+
+record_result() {
+    local component=$1
+    local status=$2
+    local exit_code=$3
+    echo "$component,$status,$exit_code" >> "$RESULTS_FILE"
+    TEST_RESULTS[$component]=$status
+}
+
+test_cli() {
+    echo "=== Testing PXF CLI ==="
+    cd "${REPO_ROOT}/cli"
+    if make test; then
+        record_result "CLI" "PASS" 0
+        return 0
+    else
+        record_result "CLI" "FAIL" $?
+        return 1
+    fi
+}
+
+test_fdw() {
+    echo "=== Testing PXF FDW ==="
+    [ -f "/usr/local/cloudberry-db/cloudberry-env.sh" ] && source 
/usr/local/cloudberry-db/cloudberry-env.sh
+    cd "${REPO_ROOT}/fdw"
+    if make test; then
+        record_result "FDW" "PASS" 0
+        return 0
+    else
+        record_result "FDW" "FAIL" $?
+        return 1
+    fi
+}
+
+test_external_table() {
+    echo "=== Testing PXF External Table ==="
+    [ -f "/usr/local/cloudberry-db/cloudberry-env.sh" ] && source 
/usr/local/cloudberry-db/cloudberry-env.sh
+    cd "${REPO_ROOT}/external-table"
+    if make installcheck; then
+        record_result "External-Table" "PASS" 0
+        return 0
+    else
+        record_result "External-Table" "FAIL" $?
+        return 1
+    fi
+}
+
+test_server() {
+    echo "=== Testing PXF Server ==="
+    [ -f "/usr/local/cloudberry-db/cloudberry-env.sh" ] && source 
/usr/local/cloudberry-db/cloudberry-env.sh
+    cd "${REPO_ROOT}/server"
+    if ./gradlew test; then
+        record_result "Server" "PASS" 0
+        return 0
+    else
+        record_result "Server" "FAIL" $?
+        return 1
+    fi
+}
+
+test_automation() {
+    echo "=== Testing PXF Automation ==="
+    if "${RUN_TESTS_DIR}/run_tests.sh"; then
+        record_result "Automation" "PASS" 0
+        return 0
+    else
+        record_result "Automation" "FAIL" $?
+        return 1
+    fi
+}
+
+display_results() {
+    echo
+    echo "=========================================="
+    echo "PXF Component Test Results"
+    echo "=========================================="
+    column -t -s',' "$RESULTS_FILE"
+    echo "=========================================="
+    echo
+    
+    # Count results
+    local total=0
+    local passed=0
+    local failed=0
+    
+    for component in "${!TEST_RESULTS[@]}"; do
+        ((total++))
+        if [ "${TEST_RESULTS[$component]}" = "PASS" ]; then
+            ((passed++))
+        else
+            ((failed++))
+        fi
+    done
+    
+    echo "Summary: $total components, $passed passed, $failed failed"
+    echo
+    
+    return $failed
+}
+
+usage() {
+    cat <<EOF
+Usage: $0 [COMPONENT...]
+
+Run PXF component tests. If no component specified, runs all.
+
+Components:
+  cli              Test PXF CLI
+  fdw              Test PXF FDW
+  external-table   Test PXF External Table
+  server           Test PXF Server
+  automation       Test PXF Automation (smoke tests)
+  all              Run all tests (default)
+
+Examples:
+  $0 cli fdw           # Run CLI and FDW tests only
+  $0 server            # Run server tests only
+  $0                   # Run all tests
+EOF
+}
+
+main() {
+    local components=("$@")
+    local exit_code=0
+
+    # If no args, run all
+    if [ ${#components[@]} -eq 0 ]; then
+        components=(cli fdw external-table server automation)
+    fi
+
+    # Handle 'all' keyword
+    if [ "${components[0]}" = "all" ]; then
+        components=(cli fdw external-table server automation)
+    fi
+
+    # Handle help
+    if [ "${components[0]}" = "-h" ] || [ "${components[0]}" = "--help" ]; then
+        usage
+        exit 0
+    fi
+
+    echo "Running tests for: ${components[*]}"
+    echo
+
+    # Run requested tests
+    for component in "${components[@]}"; do
+        case "$component" in
+            cli)
+                test_cli || exit_code=1
+                ;;
+            fdw)
+                test_fdw || exit_code=1
+                ;;
+            external-table)
+                test_external_table || exit_code=1
+                ;;
+            server)
+                test_server || exit_code=1
+                ;;
+            automation)
+                test_automation || exit_code=1
+                ;;
+            *)
+                echo "Unknown component: $component"
+                usage
+                exit 1
+                ;;
+        esac
+        echo
+    done
+
+    # Display results
+    display_results || exit_code=$?
+
+    exit $exit_code
+}
+
+main "$@"
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh
index 4f8249e9..05e81431 100755
--- a/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh
@@ -57,38 +57,214 @@ export HIVE_SERVER_PORT=${HIVE_SERVER_PORT:-${HIVE_PORT}}
 # Run health check
 health_check
 
-# Simple wrappers per group
+# Simple wrappers per group - continue on failure to collect all results
 smoke_test() {
-  make GROUP="smoke"
+  echo "[run_tests] Starting GROUP=smoke"
+  make GROUP="smoke" || true
+  save_test_reports "smoke"
   echo "[run_tests] GROUP=smoke finished"
 }
 
 hcatalog_test() {
-  make GROUP="hcatalog"
+  echo "[run_tests] Starting GROUP=hcatalog"
+  make GROUP="hcatalog" || true
+  save_test_reports "hcatalog"
   echo "[run_tests] GROUP=hcatalog finished"
 }
 
 hcfs_test() {
-  make GROUP="hcfs"
+  echo "[run_tests] Starting GROUP=hcfs"
+  make GROUP="hcfs" || true
+  save_test_reports "hcfs"
   echo "[run_tests] GROUP=hcfs finished"
 }
 
 hdfs_test() {
-  make GROUP="hdfs"
+  echo "[run_tests] Starting GROUP=hdfs"
+  make GROUP="hdfs" || true
+  save_test_reports "hdfs"
   echo "[run_tests] GROUP=hdfs finished"
 }
 
 hive_test() {
-  make GROUP="hive"
+  echo "[run_tests] Starting GROUP=hive"
+  make GROUP="hive" || true
+  save_test_reports "hive"
   echo "[run_tests] GROUP=hive finished"
 }
 
+gpdb_test() {
+  echo "[run_tests] Starting GROUP=gpdb"
+  make GROUP="gpdb" || true
+  save_test_reports "gpdb"
+  echo "[run_tests] GROUP=gpdb finished"
+}
+
+# Save test reports for a specific group to avoid overwriting
+save_test_reports() {
+  local group="$1"
+  local surefire_dir="${REPO_ROOT}/automation/target/surefire-reports"
+  local artifacts_dir="${REPO_ROOT}/automation/test_artifacts"
+  local group_dir="${artifacts_dir}/${group}"
+
+  mkdir -p "$group_dir"
+
+  if [ -d "$surefire_dir" ] && [ "$(ls -A "$surefire_dir" 2>/dev/null)" ]; then
+    echo "[run_tests] Saving $group test reports to $group_dir"
+    cp -r "$surefire_dir"/* "$group_dir/" 2>/dev/null || true
+  else
+    echo "[run_tests] No surefire reports found for $group"
+  fi
+}
+
+# Generate test summary from surefire reports
+generate_test_summary() {
+  local artifacts_dir="${REPO_ROOT}/automation/test_artifacts"
+  local summary_file="${artifacts_dir}/test_summary.json"
+
+  mkdir -p "$artifacts_dir"
+
+  echo "=== Generating Test Summary ==="
+
+  local total_tests=0
+  local total_failures=0
+  local total_errors=0
+  local total_skipped=0
+
+  # Statistics by test group
+  declare -A group_stats
+
+  # Read from each test group directory
+  for group_dir in "$artifacts_dir"/*; do
+    [ -d "$group_dir" ] || continue
+    
+    local group=$(basename "$group_dir")
+    # Skip if it's not a test group directory
+    [[ "$group" =~ ^(smoke|hcatalog|hcfs|hdfs|hive|gpdb)$ ]] || continue
+
+    echo "Processing $group test reports from $group_dir"
+    
+    local group_tests=0
+    local group_failures=0
+    local group_errors=0
+    local group_skipped=0
+
+    for xml in "$group_dir"/TEST-*.xml; do
+      [ -f "$xml" ] || continue
+
+      # Extract statistics from XML
+      local tests=$(grep -oP 'tests="\K\d+' "$xml" | head -1 || echo "0")
+      local failures=$(grep -oP 'failures="\K\d+' "$xml" | head -1 || echo "0")
+      local errors=$(grep -oP 'errors="\K\d+' "$xml" | head -1 || echo "0")
+      local skipped=$(grep -oP 'skipped="\K\d+' "$xml" | head -1 || echo "0")
+
+      # Accumulate group statistics
+      group_tests=$((group_tests + tests))
+      group_failures=$((group_failures + failures))
+      group_errors=$((group_errors + errors))
+      group_skipped=$((group_skipped + skipped))
+    done
+
+    # Store group statistics
+    
group_stats[$group]="$group_tests,$group_failures,$group_errors,$group_skipped"
+
+    # Accumulate totals
+    total_tests=$((total_tests + group_tests))
+    total_failures=$((total_failures + group_failures))
+    total_errors=$((total_errors + group_errors))
+    total_skipped=$((total_skipped + group_skipped))
+  done
+
+  local total_failed_cases=$((total_failures + total_errors))
+  local total_passed=$((total_tests - total_failed_cases - total_skipped))
+
+  # Generate JSON report
+  echo "{" > "$summary_file"
+  echo "  \"timestamp\": \"$(date -u +%Y-%m-%dT%H:%M:%SZ)\"," >> 
"$summary_file"
+  echo "  \"overall\": {" >> "$summary_file"
+  echo "    \"total\": $total_tests," >> "$summary_file"
+  echo "    \"passed\": $total_passed," >> "$summary_file"
+  echo "    \"failed\": $total_failed_cases," >> "$summary_file"
+  echo "    \"skipped\": $total_skipped" >> "$summary_file"
+  echo "  }," >> "$summary_file"
+  echo "  \"groups\": {" >> "$summary_file"
+
+  local first=true
+  for group in "${!group_stats[@]}"; do
+    IFS=',' read -r g_tests g_failures g_errors g_skipped <<< 
"${group_stats[$group]}"
+    local g_failed=$((g_failures + g_errors))
+    local g_passed=$((g_tests - g_failed - g_skipped))
+
+    if [ "$first" = false ]; then
+      echo "," >> "$summary_file"
+    fi
+
+    echo "    \"$group\": {" >> "$summary_file"
+    echo "      \"total\": $g_tests," >> "$summary_file"
+    echo "      \"passed\": $g_passed," >> "$summary_file"
+    echo "      \"failed\": $g_failed," >> "$summary_file"
+    echo "      \"skipped\": $g_skipped" >> "$summary_file"
+    echo -n "    }" >> "$summary_file"
+    first=false
+  done
+
+  echo "" >> "$summary_file"
+  echo "  }" >> "$summary_file"
+  echo "}" >> "$summary_file"
+
+  # Print summary to console
+  echo
+  echo "=========================================="
+  echo "PXF Automation Test Summary"
+  echo "=========================================="
+  echo "Total Tests: $total_tests"
+  echo "Passed: $total_passed"
+  echo "Failed: $total_failed_cases"
+  echo "Skipped: $total_skipped"
+  echo
+
+  if [ ${#group_stats[@]} -gt 0 ]; then
+    echo "Results by Group:"
+    echo "----------------------------------------"
+    printf "%-12s %6s %6s %6s %6s\n" "Group" "Total" "Pass" "Fail" "Skip"
+    echo "----------------------------------------"
+
+    for group in $(printf '%s\n' "${!group_stats[@]}" | sort); do
+      IFS=',' read -r g_tests g_failures g_errors g_skipped <<< 
"${group_stats[$group]}"
+      local g_failed=$((g_failures + g_errors))
+      local g_passed=$((g_tests - g_failed - g_skipped))
+      printf "%-12s %6d %6d %6d %6d\n" "$group" "$g_tests" "$g_passed" 
"$g_failed" "$g_skipped"
+    done
+    echo "----------------------------------------"
+  fi
+
+  echo "Test summary saved to: $summary_file"
+  echo "=========================================="
+
+  # Return 1 if any tests failed, 0 if all passed
+  if [ $total_failed_cases -gt 0 ]; then
+    echo "Found $total_failed_cases failed test cases"
+    return 1
+  else
+    echo "All tests passed"
+    return 0
+  fi
+}
+
 main() {
+  echo "[run_tests] Running all test groups..."
+
+  # Run all test groups - continue on failure to collect all results
   smoke_test
   hcatalog_test
   hcfs_test
   hdfs_test
   hive_test
+  gpdb_test
+  echo "[run_tests] All test groups completed, generating summary..."
+
+  # Generate test summary and return appropriate exit code
+  generate_test_summary
 }
 
 main "$@"
diff --git a/concourse/singlecluster/bin/gphd-env.sh 
b/concourse/singlecluster/bin/gphd-env.sh
index 7f3f9381..418f3cdc 100755
--- a/concourse/singlecluster/bin/gphd-env.sh
+++ b/concourse/singlecluster/bin/gphd-env.sh
@@ -43,6 +43,7 @@ export HIVE_BIN=${HIVE_ROOT}/bin
 
 export HADOOP_CONF=${HADOOP_ROOT}/etc/hadoop
 export ZOOKEEPER_CONF=${ZOOKEEPER_ROOT}/conf
+export HBASE_HOME=${HBASE_ROOT}
 export HBASE_CONF=${HBASE_ROOT}/conf
 export HIVE_CONF=${HIVE_ROOT}/conf
 export TEZ_CONF=${TEZ_ROOT}/conf
diff --git a/concourse/singlecluster/conf/gphd-conf.sh 
b/concourse/singlecluster/conf/gphd-conf.sh
index 4d9c50c8..fc35cb8c 100755
--- a/concourse/singlecluster/conf/gphd-conf.sh
+++ b/concourse/singlecluster/conf/gphd-conf.sh
@@ -4,7 +4,14 @@ if [ -z "${JAVA_HOME:-}" ]; then
   if [ -n "${JAVA_HADOOP:-}" ]; then
     export JAVA_HOME="${JAVA_HADOOP}"
   else
-    export JAVA_HOME=/usr/lib/jvm/java-8-openjdk
+    # Auto-detect Java 8 path for different architectures
+    if [ -d "/usr/lib/jvm/java-8-openjdk-$(dpkg --print-architecture)" ]; then
+      export JAVA_HOME="/usr/lib/jvm/java-8-openjdk-$(dpkg 
--print-architecture)"
+    elif [ -d "/usr/lib/jvm/java-8-openjdk" ]; then
+      export JAVA_HOME="/usr/lib/jvm/java-8-openjdk"
+    else
+      export JAVA_HOME=$(readlink -f /usr/bin/java | sed 's:/bin/java::')
+    fi
   fi
 fi
 export STORAGE_ROOT=$GPHD_ROOT/storage
@@ -34,7 +41,7 @@ export START_YARN=true
 export START_YARN_HISTORY_SERVER=false
 
 # Automatically start Hive Metastore server
-export START_HIVEMETASTORE=true
+export START_HIVEMETASTORE=false
 
 # Automatically start PXF service
 export START_PXF=true


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to