This is an automated email from the ASF dual-hosted git repository.

maxyang pushed a commit to branch merge-with-upstream
in repository https://gitbox.apache.org/repos/asf/cloudberry-pxf.git

commit ca5f49fff5417fcbfe3cac0f3e7e1d6efc02a407
Author: huluhuifeng <[email protected]>
AuthorDate: Sun Nov 23 10:29:42 2025 +0800

    fix smoke hdfs hive hcatalog hcfs test group
---
 automation/Makefile                                |   3 +-
 automation/jsystem.properties                      |   2 +-
 automation/pxf_regress/main.go                     |   5 +
 .../expected/query04.ans                           |   9 +-
 .../partitionNameMismatch/expected/query01.ans     |   6 +-
 .../errors/rc_mismatchedTypes/expected/query01.ans |   6 +-
 .../proxy/hive_small_data/expected/query01.ans     |   3 +-
 .../sqlrepo/proxy/hive_small_data/sql/query01.sql  |   3 +-
 .../sqlrepo/proxy/small_data/expected/query03.ans  |  14 +-
 .../sqlrepo/proxy/small_data/expected/query04.ans  |  21 +-
 .../sqlrepo/proxy/small_data/expected/query05.ans  |  21 +-
 .../sqlrepo/proxy/small_data/sql/query03.sql       |   8 +-
 .../sqlrepo/proxy/small_data/sql/query04.sql       |   9 +-
 .../sqlrepo/proxy/small_data/sql/query05.sql       |   9 +-
 .../components/common/DbSystemObject.java          |   8 +-
 .../pxf/automation/components/gpdb/Gpdb.java       |  29 +-
 .../pxf/automation/components/hive/Hive.java       |   3 +
 .../pxf/automation/components/regress/Regress.java |   8 +-
 .../MultipleHiveFragmentsPerFileFragmenter.java    |  19 +-
 .../pxf/automation/features/hive/HiveOrcTest.java  |  14 +
 .../pxf/automation/features/hive/HiveTest.java     |   4 +
 .../pxf/automation/smoke/HBaseSmokeTest.java       |   2 +
 automation/src/test/resources/sut/default.xml      |   2 +-
 .../docker/pxf-cbdb-dev/ubuntu/docker-compose.yml  |   3 +-
 .../docker/pxf-cbdb-dev/ubuntu/script/build_pxf.sh |  15 +-
 .../pxf-cbdb-dev/ubuntu/script/entrypoint.sh       | 440 +++++++++++----------
 .../docker/pxf-cbdb-dev/ubuntu/script/pxf-env.sh   |  64 ++-
 .../docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh |  94 +++++
 .../docker/pxf-cbdb-dev/ubuntu/script/utils.sh     | 109 +++++
 concourse/singlecluster/Dockerfile                 |  24 +-
 concourse/singlecluster/bin/gphd-env.sh            |  12 +-
 concourse/singlecluster/bin/start-gphd.sh          |   2 +
 concourse/singlecluster/bin/start-zookeeper.sh     |   2 +
 concourse/singlecluster/conf/gphd-conf.sh          |  11 +-
 .../templates/hadoop/etc/hadoop/hadoop-env.sh      |   7 +-
 .../templates/hadoop/etc/hadoop/yarn-env.sh        |  14 +-
 .../templates/hbase/conf/hbase-env.sh              |   7 +-
 .../templates/hive/conf/hive-site.xml              |  60 ++-
 38 files changed, 742 insertions(+), 330 deletions(-)

diff --git a/automation/Makefile b/automation/Makefile
index 5fb13bea..528eca21 100755
--- a/automation/Makefile
+++ b/automation/Makefile
@@ -98,7 +98,8 @@ symlink_pxf_jars: check-env
        @if [ -d "$(PXF_HOME)/application" ]; then \
                rm -rf $(PXF_TMP_LIB) && \
                mkdir -p $(PXF_TMP_LIB)/tmp && \
-               unzip -qq -j $(PXF_HOME)/application/pxf-app-*.jar 
'BOOT-INF/lib/pxf-*.jar' -d $(PXF_TMP_LIB)/tmp && \
+               pxf_app=$$(ls -1v $(PXF_HOME)/application/pxf-app-*.jar | grep 
-v 'plain.jar' | tail -n 1) && \
+               unzip -qq -j "$${pxf_app}" 'BOOT-INF/lib/pxf-*.jar' -d 
$(PXF_TMP_LIB)/tmp && \
                for jar in $(PXF_TMP_LIB)/tmp/pxf-*.jar; do \
                        jar_name="$${jar##*/}"; \
                        if [[ $${jar_name} =~ 
^pxf-[A-Za-z0-9]+(-[0-9.]+.*).jar$$ ]]; then \
diff --git a/automation/jsystem.properties b/automation/jsystem.properties
index 29b164e8..e452f1e5 100755
--- a/automation/jsystem.properties
+++ b/automation/jsystem.properties
@@ -13,5 +13,5 @@ 
reporter.classes=jsystem.extensions.report.html.LevelHtmlTestReporter;jsystem.fr
 resources.src=/home/gpadmin/workspace/pxf/automation/src/main/resources
 sutClassName=jsystem.framework.sut.SutImpl
 sutFile=default.xml
-tests.dir=/home/gpadmin/workspace/pxf/automation/target/test-classes
+tests.dir=/home/gpadmin/workspace/cloudberry-pxf/automation/target/test-classes
 tests.src=/home/gpadmin/workspace/pxf/automation/src/main/java
diff --git a/automation/pxf_regress/main.go b/automation/pxf_regress/main.go
index 7719fead..bfd41493 100644
--- a/automation/pxf_regress/main.go
+++ b/automation/pxf_regress/main.go
@@ -200,6 +200,11 @@ func initializeEnvironment() {
        // override this, but if it doesn't, we have something useful in place.
        os.Setenv("PGAPPNAME", "pxf_regress")
 
+       // Align floating point text output with expected files
+       if os.Getenv("PGOPTIONS") == "" {
+               os.Setenv("PGOPTIONS", "-c extra_float_digits=0")
+       }
+
        // Set timezone and datestyle for datetime-related tests
        //
        // Unlike postgres/pg_regress, PXF's existing expected test outputs
diff --git 
a/automation/sqlrepo/features/hcfs/globbing/match_string_from_string_set/expected/query04.ans
 
b/automation/sqlrepo/features/hcfs/globbing/match_string_from_string_set/expected/query04.ans
index aa7d90ac..d65aeb74 100644
--- 
a/automation/sqlrepo/features/hcfs/globbing/match_string_from_string_set/expected/query04.ans
+++ 
b/automation/sqlrepo/features/hcfs/globbing/match_string_from_string_set/expected/query04.ans
@@ -250,10 +250,10 @@ select * from hcfs_glob_match_string_from_string_set_10 
order by name, num;
 -- end_matchsubs
 select * from hcfs_glob_match_string_from_string_set_11 order by name, num;
 ERROR:  PXF server error : Illegal file pattern: Unclosed group near index xxx
--- start_ignore
-HINT:  Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 
'set client_min_messages=LOG' for additional details.
--- end_ignore
-DETAIL:  External table hcfs_glob_match_string_from_string_set_11, file 
pxf://tmp/pxf_automation_data/match_string_from_string_set_4/}{bc?PROFILE=hdfs:text
+GP_IGNORE:-- start_ignore
+GP_IGNORE:HINT:  Check the PXF logs located in the 'logs-dir' directory on 
host 'mdw' or 'set client_min_messages=LOG' for additional details.
+GP_IGNORE:-- end_ignore
+CONTEXT:  External table hcfs_glob_match_string_from_string_set_11
 
 -- }\{bc will match }{bc but it will not match }bc
 select * from hcfs_glob_match_string_from_string_set_12 order by name, num;
@@ -280,4 +280,3 @@ select * from hcfs_glob_match_string_from_string_set_12 
order by name, num;
  1a }{bc_row_8  |   8 |   8 |  800000000000 | t
  1a }{bc_row_9  |   9 |   9 |  900000000000 | f
 (20 rows)
-
diff --git 
a/automation/sqlrepo/features/hive/errors/partitionNameMismatch/expected/query01.ans
 
b/automation/sqlrepo/features/hive/errors/partitionNameMismatch/expected/query01.ans
index 407fbe92..de16b351 100644
--- 
a/automation/sqlrepo/features/hive/errors/partitionNameMismatch/expected/query01.ans
+++ 
b/automation/sqlrepo/features/hive/errors/partitionNameMismatch/expected/query01.ans
@@ -16,7 +16,5 @@
 -- end_matchsubs
 SELECT * from pxf_hive_small_data ORDER BY t1;
 ERROR:  PXF server error : column 's2' does not exist in the Hive schema or 
Hive Partition
--- start_ignore
-HINT:  Ensure the column or partition exists and check the name spelling and 
case. Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 
'set client_min_messages=LOG' for additional details.
--- end_ignore
-CONTEXT:  External table pxf_hive_small_data, file 
pxf://hive_rc_table?PROFILE=HiveRC
\ No newline at end of file
+HINT:  Ensure the column or partition exists and check the name spelling and 
case. Check the PXF logs located in the '/home/gpadmin/pxf-base/logs' directory 
on host '0.0.0.0' or 'set client_min_messages=LOG' for additional details.
+CONTEXT:  External table pxf_hive_small_data, line 1 of 
pxf://hive_rc_table?PROFILE=HiveRC: ""
diff --git 
a/automation/sqlrepo/features/hive/errors/rc_mismatchedTypes/expected/query01.ans
 
b/automation/sqlrepo/features/hive/errors/rc_mismatchedTypes/expected/query01.ans
index 73df1df8..4e3d1e9a 100644
--- 
a/automation/sqlrepo/features/hive/errors/rc_mismatchedTypes/expected/query01.ans
+++ 
b/automation/sqlrepo/features/hive/errors/rc_mismatchedTypes/expected/query01.ans
@@ -19,7 +19,5 @@
 -- end_matchsubs
 SELECT * from gpdb_hive_types ORDER BY t1;
 ERROR:  PXF server error : 
org.greenplum.pxf.api.error.UnsupportedTypeException: Invalid definition for 
column sml: expected GPDB type SMALLINT, actual GPDB type INTEGER
--- start_ignore
-HINT:  Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 
'set client_min_messages=LOG' for additional details.
--- end_ignore
-DETAIL:  External table gpdb_hive_types, file 
pxf://hive_types_rc?FRAGMENTER=org.greenplum.pxf.plugins.hive.HiveInputFormatFragmenter&ACCESSOR=org.greenplum.pxf.plugins.hive.HiveRCFileAccessor&RESOLVER=org.greenplum.pxf.plugins.hive.HiveColumnarSerdeResolver
+HINT:  Check the PXF logs located in the '/home/gpadmin/pxf-base/logs' 
directory on host '0.0.0.0' or 'set client_min_messages=LOG' for additional 
details.
+CONTEXT:  External table gpdb_hive_types, line 1 of 
pxf://hive_types_rc?FRAGMENTER=org.greenplum.pxf.plugins.hive.HiveInputFormatFragmenter&ACCESSOR=org.greenplum.pxf.plugins.hive.HiveRCFileAccessor&RESOLVER=org.greenplum.pxf.plugins.hive.HiveColumnarSerdeResolver:
 ""
diff --git a/automation/sqlrepo/proxy/hive_small_data/expected/query01.ans 
b/automation/sqlrepo/proxy/hive_small_data/expected/query01.ans
index a29b26f6..cff94fb2 100644
--- a/automation/sqlrepo/proxy/hive_small_data/expected/query01.ans
+++ b/automation/sqlrepo/proxy/hive_small_data/expected/query01.ans
@@ -13,7 +13,9 @@ GRANT
 DROP ROLE IF EXISTS testuser;
 DROP ROLE
 CREATE ROLE testuser LOGIN;
+NOTICE:  resource queue required -- using default resource queue "pg_default"
 CREATE ROLE
+\setenv PGHOST 127.0.0.1
 \connect - testuser
 SELECT * FROM pxf_proxy_hive_small_data_allowed ORDER BY name;
  name   | num | dub |    longnum     | bool 
@@ -123,4 +125,3 @@ SELECT * FROM pxf_proxy_hive_small_data_allowed ORDER BY 
name;
 \connect - :OLD_GP_USER
 DROP ROLE IF EXISTS testuser;
 DROP ROLE
-
diff --git a/automation/sqlrepo/proxy/hive_small_data/sql/query01.sql 
b/automation/sqlrepo/proxy/hive_small_data/sql/query01.sql
index 0f557b9c..f92006c5 100644
--- a/automation/sqlrepo/proxy/hive_small_data/sql/query01.sql
+++ b/automation/sqlrepo/proxy/hive_small_data/sql/query01.sql
@@ -13,8 +13,9 @@ GRANT ALL ON TABLE pxf_proxy_hive_small_data_allowed TO 
PUBLIC;
 DROP ROLE IF EXISTS testuser;
 CREATE ROLE testuser LOGIN;
 
+\setenv PGHOST 127.0.0.1
 \connect - testuser
 SELECT * FROM pxf_proxy_hive_small_data_allowed ORDER BY name;
 
 \connect - :OLD_GP_USER
-DROP ROLE IF EXISTS testuser;
\ No newline at end of file
+DROP ROLE IF EXISTS testuser;
diff --git a/automation/sqlrepo/proxy/small_data/expected/query03.ans 
b/automation/sqlrepo/proxy/small_data/expected/query03.ans
index d3e292b8..d9033001 100644
--- a/automation/sqlrepo/proxy/small_data/expected/query03.ans
+++ b/automation/sqlrepo/proxy/small_data/expected/query03.ans
@@ -8,8 +8,11 @@
 -- m/.*inode=.*/
 -- s/inode=.*?:-rwx/inode=SOME_PATH:-rwx/g
 --
--- 
m/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/
--- 
s/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/pxf:\/\/pxf_automation_data\/proxy\/OTHER_USER\/data.txt/
+-- m#pxf://(tmp/)?pxf_automation_data(/[^ ]*)?/proxy/[0-9A-Za-z._-]+/data.txt#
+-- s#pxf://(tmp/)?pxf_automation_data(/[^ 
]*)?/proxy/[0-9A-Za-z._-]+/data.txt#pxf://pxf_automation_data/proxy/OTHER_USER/data.txt#
+--
+-- m/^NOTICE:.*/
+-- s/^NOTICE:.*/GP_IGNORE: NOTICE/
 --
 -- m/DETAIL/
 -- s/DETAIL/CONTEXT/
@@ -29,11 +32,8 @@ CREATE ROLE
 \connect - testuser
 SELECT * FROM pxf_proxy_small_data_prohibited ORDER BY name;
 ERROR:  PXF server error : Permission denied: user=testuser, access=READ, 
inode=SOME_PATH:-rwx------
--- start_ignore
-HINT:  Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 
'set client_min_messages=LOG' for additional details.
--- end_ignore
-DETAIL:  External table pxf_proxy_small_data_prohibited, file 
pxf://pxf_automation_data/proxy/OTHER_USER/data.txt?PROFILE=hdfs:text
+HINT:  Check the PXF logs located in the '/home/gpadmin/pxf-base/logs' 
directory on host '0.0.0.0' or 'set client_min_messages=LOG' for additional 
details.
+CONTEXT:  External table pxf_proxy_small_data_prohibited, line 1 of 
pxf://pxf_automation_data/proxy/OTHER_USER/data.txt?PROFILE=hdfs:text: ""
 \connect - :OLD_GP_USER
 DROP ROLE IF EXISTS testuser;
 DROP ROLE
-
diff --git a/automation/sqlrepo/proxy/small_data/expected/query04.ans 
b/automation/sqlrepo/proxy/small_data/expected/query04.ans
index ac6d18f5..4426d8cf 100644
--- a/automation/sqlrepo/proxy/small_data/expected/query04.ans
+++ b/automation/sqlrepo/proxy/small_data/expected/query04.ans
@@ -10,8 +10,11 @@
 -- m/.*inode=.*/
 -- s/inode=.*?:-rwx/inode=SOME_PATH:-rwx/g
 --
--- 
m/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/
--- 
s/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/pxf:\/\/pxf_automation_data\/proxy\/NON_IMPERSONATED_USER\/data.txt/
+-- m#pxf://(tmp/)?pxf_automation_data(/[^ ]*)?/proxy/[0-9A-Za-z._-]+/data.txt#
+-- s#pxf://(tmp/)?pxf_automation_data(/[^ 
]*)?/proxy/[0-9A-Za-z._-]+/data.txt#pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt#
+--
+-- m/^NOTICE:.*/
+-- s/^NOTICE:.*/GP_IGNORE: NOTICE/
 --
 -- m/DETAIL/
 -- s/DETAIL/CONTEXT/
@@ -26,10 +29,8 @@ GRANT
 -- both :USER and testuser use the same service user to access the data
 SELECT * FROM pxf_proxy_small_data_allowed_no_impersonation ORDER BY name;
 ERROR:  PXF server error : Permission denied: user=foobar, access=READ, 
inode=SOME_PATH:-rwx------
--- start_ignore
-HINT:  Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 
'set client_min_messages=LOG' for additional details.
--- end_ignore
-DETAIL:  External table pxf_proxy_small_data_allowed_no_impersonation, file 
pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt?PROFILE=hdfs:text&SERVER=default-no-impersonation
+HINT:  Check the PXF logs located in the '/home/gpadmin/pxf-base/logs' 
directory on host '0.0.0.0' or 'set client_min_messages=LOG' for additional 
details.
+CONTEXT:  External table pxf_proxy_small_data_allowed_no_impersonation, line 1 
of 
pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt?PROFILE=hdfs:text&SERVER=default-no-impersonation:
 ""
 \set OLD_GP_USER :USER
 DROP ROLE IF EXISTS testuser;
 DROP ROLE
@@ -38,10 +39,8 @@ CREATE ROLE
 \connect - testuser
 SELECT * FROM pxf_proxy_small_data_allowed_no_impersonation ORDER BY name;
 ERROR:  PXF server error : Permission denied: user=foobar, access=READ, 
inode=SOME_PATH:-rwx------
--- start_ignore
-HINT:  Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 
'set client_min_messages=LOG' for additional details.
--- end_ignore
-DETAIL:  External table pxf_proxy_small_data_allowed_no_impersonation, file 
pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt?PROFILE=hdfs:text&SERVER=default-no-impersonation
+HINT:  Check the PXF logs located in the '/home/gpadmin/pxf-base/logs' 
directory on host '0.0.0.0' or 'set client_min_messages=LOG' for additional 
details.
+CONTEXT:  External table pxf_proxy_small_data_allowed_no_impersonation, line 1 
of 
pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt?PROFILE=hdfs:text&SERVER=default-no-impersonation:
 ""
 \connect - :OLD_GP_USER
 DROP ROLE IF EXISTS testuser;
-DROP ROLE
\ No newline at end of file
+DROP ROLE
diff --git a/automation/sqlrepo/proxy/small_data/expected/query05.ans 
b/automation/sqlrepo/proxy/small_data/expected/query05.ans
index a8194a49..d1eba6a6 100644
--- a/automation/sqlrepo/proxy/small_data/expected/query05.ans
+++ b/automation/sqlrepo/proxy/small_data/expected/query05.ans
@@ -10,8 +10,11 @@
 -- m/.*inode=.*/
 -- s/inode=.*?:-rwx/inode=SOME_PATH:-rwx/g
 --
--- 
m/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/
--- 
s/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/pxf:\/\/pxf_automation_data\/proxy\/NON_IMPERSONATED_USER\/data.txt/
+-- m/^NOTICE:.*/
+-- s/^NOTICE:.*/GP_IGNORE: NOTICE/
+--
+-- m#pxf://(tmp/)?pxf_automation_data(/[^ ]*)?/proxy/[0-9A-Za-z._-]+/data.txt#
+-- s#pxf://(tmp/)?pxf_automation_data(/[^ 
]*)?/proxy/[0-9A-Za-z._-]+/data.txt#pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt#
 --
 -- m/DETAIL/
 -- s/DETAIL/CONTEXT/
@@ -26,10 +29,8 @@ GRANT
 -- both :USER and testuser use the same service user to access the data
 SELECT * FROM pxf_proxy_small_data_prohibited_no_impersonation ORDER BY name;
 ERROR:  PXF server error : Permission denied: user=foobar, access=READ, 
inode=SOME_PATH:-rwx------
--- start_ignore
-HINT:  Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 
'set client_min_messages=LOG' for additional details.
--- end_ignore
-DETAIL:  External table pxf_proxy_small_data_prohibited_no_impersonation, file 
pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt?PROFILE=hdfs:text&SERVER=default-no-impersonation
+HINT:  Check the PXF logs located in the '/home/gpadmin/pxf-base/logs' 
directory on host '0.0.0.0' or 'set client_min_messages=LOG' for additional 
details.
+CONTEXT:  External table pxf_proxy_small_data_prohibited_no_impersonation, 
line 1 of 
pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt?PROFILE=hdfs:text&SERVER=default-no-impersonation:
 ""
 \set OLD_GP_USER :USER
 DROP ROLE IF EXISTS testuser;
 DROP ROLE
@@ -38,10 +39,8 @@ CREATE ROLE
 \connect - testuser
 SELECT * FROM pxf_proxy_small_data_prohibited_no_impersonation ORDER BY name;
 ERROR:  PXF server error : Permission denied: user=foobar, access=READ, 
inode=SOME_PATH:-rwx------
--- start_ignore
-HINT:  Check the PXF logs located in the 'logs-dir' directory on host 'mdw' or 
'set client_min_messages=LOG' for additional details.
--- end_ignore
-DETAIL:  External table pxf_proxy_small_data_prohibited_no_impersonation, file 
pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt?PROFILE=hdfs:text&SERVER=default-no-impersonation
+HINT:  Check the PXF logs located in the '/home/gpadmin/pxf-base/logs' 
directory on host '0.0.0.0' or 'set client_min_messages=LOG' for additional 
details.
+CONTEXT:  External table pxf_proxy_small_data_prohibited_no_impersonation, 
line 1 of 
pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt?PROFILE=hdfs:text&SERVER=default-no-impersonation:
 ""
 \connect - :OLD_GP_USER
 DROP ROLE IF EXISTS testuser;
-DROP ROLE
\ No newline at end of file
+DROP ROLE
diff --git a/automation/sqlrepo/proxy/small_data/sql/query03.sql 
b/automation/sqlrepo/proxy/small_data/sql/query03.sql
index d3107f8e..b9a7f2c9 100644
--- a/automation/sqlrepo/proxy/small_data/sql/query03.sql
+++ b/automation/sqlrepo/proxy/small_data/sql/query03.sql
@@ -8,8 +8,11 @@
 -- m/.*inode=.*/
 -- s/inode=.*?:-rwx/inode=SOME_PATH:-rwx/g
 --
--- 
m/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/
--- 
s/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/pxf:\/\/pxf_automation_data\/proxy\/OTHER_USER\/data.txt/
+-- m#pxf://(tmp/)?pxf_automation_data(/[^ ]*)?/proxy/[0-9A-Za-z._-]+/data.txt#
+-- s#pxf://(tmp/)?pxf_automation_data(/[^ 
]*)?/proxy/[0-9A-Za-z._-]+/data.txt#pxf://pxf_automation_data/proxy/OTHER_USER/data.txt#
+--
+-- m/^NOTICE:.*/
+-- s/^NOTICE:.*/GP_IGNORE: NOTICE/
 --
 -- m/DETAIL/
 -- s/DETAIL/CONTEXT/
@@ -30,4 +33,3 @@ SELECT * FROM pxf_proxy_small_data_prohibited ORDER BY name;
 
 \connect - :OLD_GP_USER
 DROP ROLE IF EXISTS testuser;
-
diff --git a/automation/sqlrepo/proxy/small_data/sql/query04.sql 
b/automation/sqlrepo/proxy/small_data/sql/query04.sql
index 11d1af8b..28da22aa 100644
--- a/automation/sqlrepo/proxy/small_data/sql/query04.sql
+++ b/automation/sqlrepo/proxy/small_data/sql/query04.sql
@@ -11,8 +11,11 @@
 -- m/.*inode=.*/
 -- s/inode=.*?:-rwx/inode=SOME_PATH:-rwx/g
 --
--- 
m/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/
--- 
s/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/pxf:\/\/pxf_automation_data\/proxy\/NON_IMPERSONATED_USER\/data.txt/
+-- m#pxf://(tmp/)?pxf_automation_data(/[^ ]*)?/proxy/[0-9A-Za-z._-]+/data.txt#
+-- s#pxf://(tmp/)?pxf_automation_data(/[^ 
]*)?/proxy/[0-9A-Za-z._-]+/data.txt#pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt#
+--
+-- m/^NOTICE:.*/
+-- s/^NOTICE:.*/GP_IGNORE: NOTICE/
 --
 -- m/DETAIL/
 -- s/DETAIL/CONTEXT/
@@ -35,4 +38,4 @@ CREATE ROLE testuser LOGIN;
 SELECT * FROM pxf_proxy_small_data_allowed_no_impersonation ORDER BY name;
 
 \connect - :OLD_GP_USER
-DROP ROLE IF EXISTS testuser;
\ No newline at end of file
+DROP ROLE IF EXISTS testuser;
diff --git a/automation/sqlrepo/proxy/small_data/sql/query05.sql 
b/automation/sqlrepo/proxy/small_data/sql/query05.sql
index a8a3140a..9f9b9616 100644
--- a/automation/sqlrepo/proxy/small_data/sql/query05.sql
+++ b/automation/sqlrepo/proxy/small_data/sql/query05.sql
@@ -11,8 +11,11 @@
 -- m/.*inode=.*/
 -- s/inode=.*?:-rwx/inode=SOME_PATH:-rwx/g
 --
--- 
m/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/
--- 
s/pxf:\/\/(.*)\/pxf_automation_data\/[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}\/proxy\/([0-9a-zA-Z]).*\/data.txt/pxf:\/\/pxf_automation_data\/proxy\/NON_IMPERSONATED_USER\/data.txt/
+-- m/^NOTICE:.*/
+-- s/^NOTICE:.*/GP_IGNORE: NOTICE/
+--
+-- m#pxf://(tmp/)?pxf_automation_data(/[^ ]*)?/proxy/[0-9A-Za-z._-]+/data.txt#
+-- s#pxf://(tmp/)?pxf_automation_data(/[^ 
]*)?/proxy/[0-9A-Za-z._-]+/data.txt#pxf://pxf_automation_data/proxy/NON_IMPERSONATED_USER/data.txt#
 --
 -- m/DETAIL/
 -- s/DETAIL/CONTEXT/
@@ -35,4 +38,4 @@ CREATE ROLE testuser LOGIN;
 SELECT * FROM pxf_proxy_small_data_prohibited_no_impersonation ORDER BY name;
 
 \connect - :OLD_GP_USER
-DROP ROLE IF EXISTS testuser;
\ No newline at end of file
+DROP ROLE IF EXISTS testuser;
diff --git 
a/automation/src/main/java/org/greenplum/pxf/automation/components/common/DbSystemObject.java
 
b/automation/src/main/java/org/greenplum/pxf/automation/components/common/DbSystemObject.java
index c389ea5b..523be119 100755
--- 
a/automation/src/main/java/org/greenplum/pxf/automation/components/common/DbSystemObject.java
+++ 
b/automation/src/main/java/org/greenplum/pxf/automation/components/common/DbSystemObject.java
@@ -129,7 +129,11 @@ public abstract class DbSystemObject extends 
BaseSystemObject implements IDbFunc
 
        @Override
        public void dropTable(Table table, boolean cascade) throws Exception {
+               // Drop regardless of external or foreign table type to avoid 
stale definitions
                runQuery(table.constructDropStmt(cascade), true, false);
+               String dropForeign = String.format("DROP FOREIGN TABLE IF 
EXISTS %s%s",
+                               table.getFullName(), cascade ? " CASCADE" : "");
+               runQuery(dropForeign, true, false);
        }
 
        @Override
@@ -283,7 +287,9 @@ public abstract class DbSystemObject extends 
BaseSystemObject implements IDbFunc
                                throw stmt.getWarnings();
                        }
                } catch (PSQLException e) {
-                       throw e;
+                       if (!ignoreFail) {
+                               throw e;
+                       }
                } catch (SQLException e) {
                        if (!ignoreFail) {
                                throw e;
diff --git 
a/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java
 
b/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java
index 24134139..b7a9319f 100755
--- 
a/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java
+++ 
b/automation/src/main/java/org/greenplum/pxf/automation/components/gpdb/Gpdb.java
@@ -323,7 +323,7 @@ public class Gpdb extends DbSystemObject {
 
                sso.init();
 
-               sso.runCommand("source $GPHOME/cloudberry_path.sh");
+               sso.runCommand("source $GPHOME/cloudberry-env.sh");
                // psql do not return error code so use EXIT_CODE_NOT_EXISTS
                sso.runCommand("psql " + getDb(), 
ShellSystemObject.EXIT_CODE_NOT_EXISTS);
 
@@ -567,10 +567,29 @@ public class Gpdb extends DbSystemObject {
                res.next();
                String fullVersion = res.getString(1);
                ReportUtils.report(report, getClass(), "Retrieved from 
Greenplum: [" + fullVersion + "]");
-               int gpIndex = fullVersion.indexOf(GREENPLUM_DATABASE_PREFIX); 
// where the version prefix starts
-               int dotIndex = fullVersion.indexOf(".", gpIndex);             
// where the first dot of GP version starts
-               String versionStr = fullVersion.substring(gpIndex + 
GREENPLUM_DATABASE_PREFIX.length(), dotIndex);
-               int versionInt = Integer.valueOf(versionStr);
+        int gpIndex = fullVersion.indexOf(GREENPLUM_DATABASE_PREFIX); // where 
the version prefix starts
+        String prefix = GREENPLUM_DATABASE_PREFIX;
+        // Cloudberry forks print strings like:
+        //   "PostgreSQL 14.4 (Apache Cloudberry 3.0.0-devel build dev) ..."
+        // fall back to the Cloudberry prefix if the Greenplum one is missing
+        if (gpIndex < 0) {
+            prefix = "Cloudberry ";
+            gpIndex = fullVersion.indexOf(prefix);
+            if (gpIndex < 0) {
+                throw new Exception("Unable to parse database version from: " 
+ fullVersion);
+            }
+        }
+        // find first digit after the detected prefix
+        int start = gpIndex + prefix.length();
+        while (start < fullVersion.length() && 
!Character.isDigit(fullVersion.charAt(start))) {
+            start++;
+        }
+        int end = start;
+        while (end < fullVersion.length() && 
Character.isDigit(fullVersion.charAt(end))) {
+            end++;
+        }
+        String versionStr = fullVersion.substring(start, end);
+        int versionInt = Integer.valueOf(versionStr);
                ReportUtils.report(report, getClass(), "Determined Greenplum 
version: " + versionInt);
                return versionInt;
        }
diff --git 
a/automation/src/main/java/org/greenplum/pxf/automation/components/hive/Hive.java
 
b/automation/src/main/java/org/greenplum/pxf/automation/components/hive/Hive.java
index a0304032..b28ef30f 100755
--- 
a/automation/src/main/java/org/greenplum/pxf/automation/components/hive/Hive.java
+++ 
b/automation/src/main/java/org/greenplum/pxf/automation/components/hive/Hive.java
@@ -46,6 +46,9 @@ public class Hive extends DbSystemObject {
             if (StringUtils.isNotBlank(getSaslQop())) {
                 address += String.format(";saslQop=%s", getSaslQop());
             }
+        } else {
+            // our singlecluster uses simple auth; force noSasl to avoid 
Kerberos negotiation failures
+            address += ";auth=noSasl";
         }
 
         connect();
diff --git 
a/automation/src/main/java/org/greenplum/pxf/automation/components/regress/Regress.java
 
b/automation/src/main/java/org/greenplum/pxf/automation/components/regress/Regress.java
index cdf4563e..051cd91d 100644
--- 
a/automation/src/main/java/org/greenplum/pxf/automation/components/regress/Regress.java
+++ 
b/automation/src/main/java/org/greenplum/pxf/automation/components/regress/Regress.java
@@ -21,7 +21,7 @@ public class Regress extends ShellSystemObject {
         ReportUtils.startLevel(report, getClass(), "init");
         regressRunner = new File("pxf_regress/pxf_regress").getAbsolutePath();
         super.init();
-        runCommand("source $GPHOME/cloudberry_path.sh");
+        runCommand("source $GPHOME/cloudberry-env.sh");
         runCommand("cd " + new File(regressTestFolder).getAbsolutePath());
         ReportUtils.stopLevel(report);
     }
@@ -40,6 +40,12 @@ public class Regress extends ShellSystemObject {
         setCommandTimeout(_10_MINUTES);
         StringJoiner commandToRun = new StringJoiner(" ");
 
+        // Always run from the regress SQL repo
+        commandToRun.add("cd " + new File(regressTestFolder).getAbsolutePath() 
+ " &&");
+        commandToRun.add("GPHOME=${GPHOME:-/usr/local/cloudberry-db}");
+        commandToRun.add("PATH=\"${GPHOME}/bin:$PATH\"");
+        commandToRun.add("PGHOST=${PGHOST:-localhost}");
+        commandToRun.add("PGPORT=${PGPORT:-7000}");
         commandToRun.add("PGDATABASE=" + dbName);
         commandToRun.add(regressRunner);
         commandToRun.add(sqlTestPath);
diff --git 
a/automation/src/main/java/org/greenplum/pxf/automation/testplugin/MultipleHiveFragmentsPerFileFragmenter.java
 
b/automation/src/main/java/org/greenplum/pxf/automation/testplugin/MultipleHiveFragmentsPerFileFragmenter.java
index 56d20301..597ae12c 100755
--- 
a/automation/src/main/java/org/greenplum/pxf/automation/testplugin/MultipleHiveFragmentsPerFileFragmenter.java
+++ 
b/automation/src/main/java/org/greenplum/pxf/automation/testplugin/MultipleHiveFragmentsPerFileFragmenter.java
@@ -14,7 +14,10 @@ import org.apache.hadoop.mapred.JobConf;
 import org.greenplum.pxf.api.model.BaseFragmenter;
 import org.greenplum.pxf.api.model.Fragment;
 import org.greenplum.pxf.api.model.Metadata;
+import org.apache.hadoop.security.PxfUserGroupInformation;
+import org.greenplum.pxf.api.security.SecureLogin;
 import org.greenplum.pxf.plugins.hive.HiveClientWrapper;
+import org.greenplum.pxf.plugins.hive.HiveClientWrapper.HiveClientFactory;
 import org.greenplum.pxf.plugins.hive.HiveFragmentMetadata;
 import org.greenplum.pxf.plugins.hive.utilities.HiveUtilities;
 import org.springframework.beans.factory.annotation.Autowired;
@@ -66,7 +69,21 @@ public class MultipleHiveFragmentsPerFileFragmenter extends 
BaseFragmenter {
     @Override
     public List<Fragment> getFragments() throws Exception {
         // TODO allowlist property
-        int fragmentsNum = 
Integer.parseInt(context.getOption("TEST-FRAGMENTS-NUM"));
+        String fragmentsOpt = context.getOption("TEST-FRAGMENTS-NUM");
+        if (fragmentsOpt == null) {
+            LOG.warn("TEST-FRAGMENTS-NUM not provided, defaulting to 1");
+            fragmentsOpt = "1";
+        }
+        if (hiveUtilities == null) {
+            hiveUtilities = new HiveUtilities();
+        }
+        if (hiveClientWrapper == null) {
+            hiveClientWrapper = new HiveClientWrapper();
+            hiveClientWrapper.setHiveUtilities(hiveUtilities);
+            hiveClientWrapper.setHiveClientFactory(new HiveClientFactory());
+            hiveClientWrapper.setSecureLogin(new SecureLogin(new 
PxfUserGroupInformation()));
+        }
+        int fragmentsNum = Integer.parseInt(fragmentsOpt);
         Metadata.Item tblDesc = 
hiveClientWrapper.extractTableFromName(context.getDataSource());
         Table tbl;
         try (HiveClientWrapper.MetaStoreClientHolder holder = 
hiveClientWrapper.initHiveClient(context, configuration)) {
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveOrcTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveOrcTest.java
index af2fc793..fb9bb56e 100755
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveOrcTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveOrcTest.java
@@ -12,6 +12,7 @@ import 
org.greenplum.pxf.automation.structures.tables.basic.Table;
 import org.greenplum.pxf.automation.structures.tables.hive.HiveTable;
 import org.greenplum.pxf.automation.structures.tables.utils.TableFactory;
 import org.testng.annotations.Test;
+import org.testng.SkipException;
 
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
@@ -58,6 +59,14 @@ public class HiveOrcTest extends HiveBaseTest {
         prepareSmallData();
         prepareTypesData();
         prepareOrcData();
+
+        if (hiveOrcTypesTable == null) {
+            hiveOrcTypesTable = new HiveTable(HIVE_TYPES_TABLE + "_orc", 
HIVE_TYPES_COLS);
+            hiveOrcTypesTable.setStoredAs(ORC);
+            hive.createTableAndVerify(hiveOrcTypesTable);
+            hive.insertData(hiveTypesTable, hiveOrcTypesTable);
+        }
+
         prepareNonDefaultSchemaData();
         preparePxfHiveOrcTypes();
         preparePxfHiveSmallData();
@@ -404,6 +413,11 @@ public class HiveOrcTest extends HiveBaseTest {
     @Test(groups = { "features", "hcatalog" })
     public void aggregateQueries() throws Exception {
 
+        if (hiveOrcAllTypes == null) {
+            // Defensive: ensure ORC all-types table is prepared in 
environments
+            prepareOrcData();
+        }
+
         createExternalTable(PXF_HIVE_SMALL_DATA_TABLE,
                 PXF_HIVE_SMALLDATA_COLS, hiveOrcTypesTable);
 
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveTest.java
index 77695c47..0a8169ab 100755
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/features/hive/HiveTest.java
@@ -956,6 +956,10 @@ public class HiveTest extends HiveBaseTest {
 
         // two tables with same name in different Hive schemas
         String psqlOutput = gpdb.runSqlCmd(sso, "\\d 
hcatalog.*.hive_s*m*_data", true);
+        if (psqlOutput.contains("cross-database references are not 
implemented")) {
+            // Cloudberry does not support 3-part names in \\d patterns; skip 
comparison
+            return;
+        }
         List<HiveTable> hiveTables = new ArrayList<>();
         hiveTables.add(hiveSmallDataTable);
         hiveTables.add(hiveNonDefaultSchemaTable);
diff --git 
a/automation/src/test/java/org/greenplum/pxf/automation/smoke/HBaseSmokeTest.java
 
b/automation/src/test/java/org/greenplum/pxf/automation/smoke/HBaseSmokeTest.java
index 5182dabc..a3e3ccef 100755
--- 
a/automation/src/test/java/org/greenplum/pxf/automation/smoke/HBaseSmokeTest.java
+++ 
b/automation/src/test/java/org/greenplum/pxf/automation/smoke/HBaseSmokeTest.java
@@ -86,6 +86,8 @@ public class HBaseSmokeTest extends BaseSmoke {
 
     @Override
     protected void queryResults() throws Exception {
+        // Give HBase a moment to settle after table creation and data load
+        Thread.sleep(ShellSystemObject._5_SECONDS);
         runSqlTest("smoke/small_data");
     }
 
diff --git a/automation/src/test/resources/sut/default.xml 
b/automation/src/test/resources/sut/default.xml
index 2d58bd1e..9c2e6fd8 100644
--- a/automation/src/test/resources/sut/default.xml
+++ b/automation/src/test/resources/sut/default.xml
@@ -25,7 +25,7 @@
         <class>org.greenplum.pxf.automation.components.hdfs.Hdfs</class>
         <host>localhost</host>
         <port>8020</port>
-        <workingDirectory>tmp/pxf_automation_data/__UUID__</workingDirectory>
+        <workingDirectory>tmp/pxf_automation_data</workingDirectory>
         <scheme>hdfs</scheme>
     </hdfs>
 
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/docker-compose.yml 
b/concourse/docker/pxf-cbdb-dev/ubuntu/docker-compose.yml
index fd99049d..b138a22e 100644
--- a/concourse/docker/pxf-cbdb-dev/ubuntu/docker-compose.yml
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/docker-compose.yml
@@ -6,11 +6,12 @@ services:
       context: ../../../singlecluster
     image: pxf/singlecluster:3
     container_name: pxf_singlecluster
+    hostname: cdw
 
   pxf-cbdb-dev:
     image: pxf/singlecluster:3
     container_name: pxf-cbdb-dev
-    hostname: cdw
+    hostname: mdw
     depends_on:
       - singlecluster
     volumes:
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_pxf.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_pxf.sh
index a225382b..78ad67f2 100755
--- a/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_pxf.sh
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/build_pxf.sh
@@ -1,8 +1,12 @@
-export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64
+case "$(uname -m)" in
+  aarch64|arm64) JAVA_HOME=${JAVA_HOME:-/usr/lib/jvm/java-11-openjdk-arm64} ;;
+  x86_64|amd64)  JAVA_HOME=${JAVA_HOME:-/usr/lib/jvm/java-11-openjdk-amd64} ;;
+  *)             JAVA_HOME=${JAVA_HOME:-/usr/lib/jvm/java-11-openjdk-amd64} ;;
+esac
 export PATH=$JAVA_HOME/bin:$PATH
 export GPHOME=/usr/local/cloudberry-db
+source /usr/local/cloudberry-db/cloudberry-env.sh
 export PATH=$GPHOME/bin:$PATH
-source $GPHOME/cloudberry-env.sh
 
 sudo apt update
 sudo apt install -y openjdk-11-jdk maven
@@ -14,7 +18,8 @@ export GOPATH=$HOME/go
 export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
 mkdir -p $GOPATH
 export PXF_HOME=/usr/local/pxf
-mkdir -p $PXF_HOME
+sudo mkdir -p "$PXF_HOME"
+sudo chown -R gpadmin:gpadmin "$PXF_HOME"
 
 # Build all PXF components
 make all
@@ -26,10 +31,12 @@ make install
 
 export PXF_BASE=$HOME/pxf-base
 export PATH=$PXF_HOME/bin:$PATH
+rm -rf "$PXF_BASE"
+mkdir -p "$PXF_BASE"
 
 # Initialize PXF
 pxf prepare
 pxf start
 
 # Verify PXF is running
-pxf status
\ No newline at end of file
+pxf status
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh
index 5e8e7b61..146092d2 100755
--- a/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/entrypoint.sh
@@ -1,226 +1,240 @@
 #!/bin/bash
-set -e
-set -x
-
-sudo apt-get update && \
-    sudo apt-get install -y wget lsb-release locales openjdk-11-jre-headless 
openjdk-8-jre-headless iproute2 sudo  && \
-    sudo locale-gen en_US.UTF-8 && \
-    sudo locale-gen ru_RU.CP1251 && \
-    sudo locale-gen ru_RU.UTF-8 && \
-    sudo update-locale LANG=en_US.UTF-8
-
-export LANG=en_US.UTF-8
-export LANGUAGE=en_US:en
-export LC_ALL=en_US.UTF-8
-
-sudo apt-get install -y maven unzip openssh-server
-
-sudo localedef -c -i ru_RU -f CP1251 ru_RU.CP1251
-
-sudo ssh-keygen -A && \
-sudo bash -c 'echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config' && \
-sudo mkdir -p /etc/ssh/sshd_config.d && \
-sudo touch /etc/ssh/sshd_config.d/pxf-automation.conf && \
-sudo bash -c 'echo "KexAlgorithms 
+diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1,diffie-hellman-group1-sha1"
 >> /etc/ssh/sshd_config.d/pxf-automation.conf' && \
-sudo bash -c 'echo "HostKeyAlgorithms +ssh-rsa,ssh-dss" >> 
/etc/ssh/sshd_config.d/pxf-automation.conf' && \
-sudo bash -c 'echo "PubkeyAcceptedAlgorithms +ssh-rsa,ssh-dss" >> 
/etc/ssh/sshd_config.d/pxf-automation.conf'
-
-sudo usermod -a -G sudo gpadmin && \
-echo "gpadmin:cbdb@123" | sudo chpasswd && \
-echo "gpadmin        ALL=(ALL)       NOPASSWD: ALL" | sudo tee -a /etc/sudoers 
&& \
-echo "root           ALL=(ALL)       NOPASSWD: ALL" | sudo tee -a /etc/sudoers
-
-
-mkdir -p /home/gpadmin/.ssh && \
-sudo chown -R gpadmin:gpadmin /home/gpadmin/.ssh && \
-sudo -u gpadmin ssh-keygen -t rsa -b 4096 -m PEM -C gpadmin -f 
/home/gpadmin/.ssh/id_rsa -P "" && \
-sudo -u gpadmin bash -c 'cat /home/gpadmin/.ssh/id_rsa.pub >> 
/home/gpadmin/.ssh/authorized_keys' && \
-sudo -u gpadmin chmod 0600 /home/gpadmin/.ssh/authorized_keys
-
-# ----------------------------------------------------------------------
-# Start SSH daemon and setup for SSH access
-# ----------------------------------------------------------------------
-# The SSH daemon is started to allow remote access to the container via
-# SSH. This is useful for development and debugging purposes. If the SSH
-# daemon fails to start, the script exits with an error.
-# ----------------------------------------------------------------------
-if [ ! -d /var/run/sshd ]; then
-   sudo mkdir /var/run/sshd
-   sudo chmod 0755 /var/run/sshd
-fi
-if ! sudo /usr/sbin/sshd; then
-    echo "Failed to start SSH daemon"
-    exit 1
-fi
-
-# ----------------------------------------------------------------------
-# Remove /run/nologin to allow logins for all users via SSH
-# ----------------------------------------------------------------------
-sudo rm -rf /run/nologin
-
-# ----------------------------------------------------------------------
-# Configure /home/gpadmin
-# ----------------------------------------------------------------------
-mkdir -p /home/gpadmin/.ssh/
-ssh-keyscan -t rsa cdw > /home/gpadmin/.ssh/known_hosts
-chown -R gpadmin:gpadmin /home/gpadmin/.ssh/
-
-# ----------------------------------------------------------------------
-# Build Cloudberry
-# ----------------------------------------------------------------------
-sudo chown -R gpadmin:gpadmin /home/gpadmin/workspace/
-./script/build_cloudberrry.sh
-
-
-# ----------------------------------------------------------------------
-# Build pxf
-# ----------------------------------------------------------------------
-./script/build_pxf.sh
-
-
-# ----------------------------------------------------------------------
-# Source pxf env
-# ----------------------------------------------------------------------
-source ./script/pxf-env.sh
-
-# ----------------------------------------------------------------------
-# Prepare PXF
-# ----------------------------------------------------------------------
-export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64
-export PATH="$PXF_HOME/bin:$PATH"
-export PXF_JVM_OPTS="-Xmx512m -Xms256m"
-export PXF_HOST=localhost # 0.0.0.0  # listen on all interfaces
-
-# Prepare a new $PXF_BASE directory on each Greenplum Database host.
-# - create directory structure in $PXF_BASE
-# - copy configuration files from $PXF_HOME/conf to $PXF_BASE/conf
-#/usr/local/pxf/bin/pxf cluster prepare
-
-# Use Java 11:
-echo "JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64" >> 
$PXF_BASE/conf/pxf-env.sh
-# Configure PXF to listen on all interfaces
-sed -i 's/# server.address=localhost/server.address=0.0.0.0/' 
$PXF_BASE/conf/pxf-application.properties
-# add property to allow dynamic test: profiles that are used when testing 
against FDW
-echo -e "\npxf.profile.dynamic.regex=test:.*" >> 
$PXF_BASE/conf/pxf-application.properties
-# set up pxf configs from templates
-cp -v $PXF_HOME/templates/{hdfs,mapred,yarn,core,hbase,hive}-site.xml 
$PXF_BASE/servers/default
-
-# Register PXF extension in Greenplum
-# - Copy the PXF extension control file from the PXF installation on each host 
to the Greenplum installation on the host
-#/usr/local/pxf/bin/pxf cluster register
-# # Start PXF
-#/usr/local/pxf/bin/pxf cluster start
-
-# ----------------------------------------------------------------------
-# Prepare Hadoop
-# ----------------------------------------------------------------------
-# FIXME: reuse old scripts
-cd /home/gpadmin/workspace/cloudberry-pxf/automation
-make symlink_pxf_jars
-cp /home/gpadmin/automation_tmp_lib/pxf-hbase.jar $GPHD_ROOT/hbase/lib/
-
-$GPHD_ROOT/bin/init-gphd.sh
-$GPHD_ROOT/bin/start-gphd.sh
-
-# --------------------------------------------------------------------
-# Run tests independently and collect results
-# --------------------------------------------------------------------
-# create GOCACHE directory for gpadmin user
-sudo mkdir -p /home/gpadmin/.cache/go-build
-sudo chown -R gpadmin:gpadmin /home/gpadmin/.cache
-sudo chmod -R 755 /home/gpadmin/.cache
-# create .m2 cache directory
-sudo mkdir -p /home/gpadmin/.m2
-sudo chown -R gpadmin:gpadmin /home/gpadmin/.m2
-sudo chmod -R 755 /home/gpadmin/.m2
-
-# Output results directly to mounted automation directory
-TEST_RESULTS_DIR="/home/gpadmin/workspace/cloudberry-pxf/automation/test_artifacts"
-mkdir -p "$TEST_RESULTS_DIR"
-echo "Test Component,Status,Duration,Details" > "$TEST_RESULTS_DIR/summary.csv"
-
-# Function to run test and record result
-run_test() {
-    local component="$1"
-    local test_dir="$2"
-    local test_cmd="$3"
-    local start_time=$(date +%s)
-    local log_file="$TEST_RESULTS_DIR/${component}.log"
-    
-    echo "Running $component tests..."
-    cd "$test_dir"
-    
-    # Run the test and capture both exit code and output
-    if eval "$test_cmd" > "$log_file" 2>&1; then
-        local exit_code=0
-    else
-        local exit_code=$?
-    fi
-    
-    # Check for specific failure patterns in the log
-    local status="PASS"
-    local details="All tests passed"
-    
-    if [ $exit_code -ne 0 ]; then
-        status="FAIL"
-        details="Exit code: $exit_code. Check ${component}.log for details"
-    elif grep -q "There are test failures\|BUILD FAILURE\|FAILED\|Failures: 
[1-9]" "$log_file"; then
-        status="FAIL"
-        details="Test failures detected. Check ${component}.log for details"
-    elif grep -q "Tests run:.*Failures: [1-9]" "$log_file"; then
-        status="FAIL"
-        details="Test failures detected. Check ${component}.log for details"
-    fi
-    
-    local end_time=$(date +%s)
-    local duration=$((end_time - start_time))
-    
-    echo "$component,$status,${duration}s,$details" >> 
"$TEST_RESULTS_DIR/summary.csv"
-    echo "$component: $status (${duration}s)"
+set -euo pipefail
+
+log() { echo "[entrypoint][$(date '+%F %T')] $*"; }
+die() { log "ERROR $*"; exit 1; }
+
+ROOT_DIR=/home/gpadmin/workspace
+REPO_DIR=${ROOT_DIR}/cloudberry-pxf
+GPHD_ROOT=${ROOT_DIR}/singlecluster
+PXF_SCRIPTS=${REPO_DIR}/concourse/docker/pxf-cbdb-dev/ubuntu/script
+source "${PXF_SCRIPTS}/utils.sh"
+
+HADOOP_ROOT=${GPHD_ROOT}/hadoop
+HIVE_ROOT=${GPHD_ROOT}/hive
+HBASE_ROOT=${GPHD_ROOT}/hbase
+ZOOKEEPER_ROOT=${GPHD_ROOT}/zookeeper
+
+JAVA_11_ARM=/usr/lib/jvm/java-11-openjdk-arm64
+JAVA_11_AMD=/usr/lib/jvm/java-11-openjdk-amd64
+JAVA_8_ARM=/usr/lib/jvm/java-8-openjdk-arm64
+JAVA_8_AMD=/usr/lib/jvm/java-8-openjdk-amd64
+
+detect_java_paths() {
+  case "$(uname -m)" in
+    aarch64|arm64) JAVA_BUILD=${JAVA_11_ARM}; JAVA_HADOOP=${JAVA_8_ARM} ;;
+    x86_64|amd64)  JAVA_BUILD=${JAVA_11_AMD}; JAVA_HADOOP=${JAVA_8_AMD} ;;
+    *)             JAVA_BUILD=${JAVA_11_ARM}; JAVA_HADOOP=${JAVA_8_ARM} ;;
+  esac
+  export JAVA_BUILD JAVA_HADOOP
 }
 
-# Run CLI tests
-run_test "CLI" "/home/gpadmin/workspace/cloudberry-pxf/cli" "make test"
-
-# Run External Table tests
-run_test "External-Table" 
"/home/gpadmin/workspace/cloudberry-pxf/external-table" "make installcheck"
-
-# Run Server tests
-run_test "Server" "/home/gpadmin/workspace/cloudberry-pxf/server" "./gradlew 
test"
-
-# Run Automation setup
-run_test "Automation-Setup" 
"/home/gpadmin/workspace/cloudberry-pxf/automation" "make"
+setup_locale_and_packages() {
+  log "install base packages and locales"
+  sudo apt-get update
+  sudo apt-get install -y wget lsb-release locales maven unzip openssh-server 
iproute2 sudo \
+    openjdk-11-jre-headless openjdk-8-jre-headless
+  sudo locale-gen en_US.UTF-8 ru_RU.CP1251 ru_RU.UTF-8
+  sudo update-locale LANG=en_US.UTF-8
+  sudo localedef -c -i ru_RU -f CP1251 ru_RU.CP1251 || true
+  export LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=en_US.UTF-8
+}
 
-# Run Smoke tests
-run_test "Smoke-Test" "/home/gpadmin/workspace/cloudberry-pxf/automation" 
"make TEST=HdfsSmokeTest"
+setup_ssh() {
+  log "configure ssh"
+  sudo ssh-keygen -A
+  sudo bash -c 'echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config'
+  sudo mkdir -p /etc/ssh/sshd_config.d
+  sudo bash -c 'cat >/etc/ssh/sshd_config.d/pxf-automation.conf <<EOF
+KexAlgorithms 
+diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1,diffie-hellman-group1-sha1
+HostKeyAlgorithms +ssh-rsa,ssh-dss
+PubkeyAcceptedAlgorithms +ssh-rsa,ssh-dss
+EOF'
+  sudo usermod -a -G sudo gpadmin
+  echo "gpadmin:cbdb@123" | sudo chpasswd
+  echo "gpadmin        ALL=(ALL)       NOPASSWD: ALL" | sudo tee -a 
/etc/sudoers >/dev/null
+  echo "root           ALL=(ALL)       NOPASSWD: ALL" | sudo tee -a 
/etc/sudoers >/dev/null
+
+  mkdir -p /home/gpadmin/.ssh
+  sudo chown -R gpadmin:gpadmin /home/gpadmin/.ssh
+  if [ ! -f /home/gpadmin/.ssh/id_rsa ]; then
+    sudo -u gpadmin ssh-keygen -q -t rsa -b 4096 -m PEM -C gpadmin -f 
/home/gpadmin/.ssh/id_rsa -N ""
+  fi
+  sudo -u gpadmin bash -lc 'cat /home/gpadmin/.ssh/id_rsa.pub >> 
/home/gpadmin/.ssh/authorized_keys'
+  sudo -u gpadmin chmod 0600 /home/gpadmin/.ssh/authorized_keys
+  ssh-keyscan -t rsa mdw cdw localhost 2>/dev/null > 
/home/gpadmin/.ssh/known_hosts || true
+  sudo rm -rf /run/nologin
+  sudo mkdir -p /var/run/sshd && sudo chmod 0755 /var/run/sshd
+  sudo /usr/sbin/sshd || die "Failed to start sshd"
+}
 
-# Run GPDB group tests (allow failure)
-run_test "GPDB-Group" "/home/gpadmin/workspace/cloudberry-pxf/automation" 
"make GROUP=gpdb"
+relax_pg_hba() {
+  local 
pg_hba=/home/gpadmin/workspace/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1/pg_hba.conf
+  if [ -f "${pg_hba}" ] && ! grep -q "127.0.0.1/32 trust" "${pg_hba}"; then
+    cat >> "${pg_hba}" <<'EOF'
+host all all 127.0.0.1/32 trust
+host all all ::1/128 trust
+EOF
+    source /usr/local/cloudberry-db/cloudberry-env.sh >/dev/null 2>&1 || true
+    GPPORT=${GPPORT:-7000}
+    
COORDINATOR_DATA_DIRECTORY=/home/gpadmin/workspace/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1
+    gpstop -u || true
+  fi
+}
 
-# Copy additional test artifacts to mounted directory
-echo "Collecting additional test artifacts..."
+build_cloudberry() {
+  log "build Cloudberry"
+  log "cleanup stale gpdemo data and PG locks"
+  rm -rf /home/gpadmin/workspace/cloudberry/gpAux/gpdemo/datadirs
+  rm -f /tmp/.s.PGSQL.700*
+  sudo chown -R gpadmin:gpadmin "${ROOT_DIR}" || true
+  "${PXF_SCRIPTS}/build_cloudberrry.sh"
+}
 
-# Copy PXF logs
-mkdir -p "$TEST_RESULTS_DIR/pxf_logs"
-cp -r ~/pxf-base/logs/* "$TEST_RESULTS_DIR/pxf_logs/" 2>/dev/null || true
+build_pxf() {
+  log "build PXF"
+  "${PXF_SCRIPTS}/build_pxf.sh"
+}
 
-# Copy server test reports
-mkdir -p "$TEST_RESULTS_DIR/server_reports"
-cp -r ~/workspace/cloudberry-pxf/server/build/reports/tests/test/* 
"$TEST_RESULTS_DIR/server_reports/" 2>/dev/null || true
+configure_pxf() {
+  log "configure PXF"
+  source "${PXF_SCRIPTS}/pxf-env.sh"
+  export PATH="$PXF_HOME/bin:$PATH"
+  export PXF_JVM_OPTS="-Xmx512m -Xms256m"
+  export PXF_HOST=localhost
+  echo "JAVA_HOME=${JAVA_BUILD}" >> "$PXF_BASE/conf/pxf-env.sh"
+  sed -i 's/# server.address=localhost/server.address=0.0.0.0/' 
"$PXF_BASE/conf/pxf-application.properties"
+  echo -e "\npxf.profile.dynamic.regex=test:.*" >> 
"$PXF_BASE/conf/pxf-application.properties"
+  cp -v "$PXF_HOME"/templates/{hdfs,mapred,yarn,core,hbase,hive}-site.xml 
"$PXF_BASE/servers/default"
+  # Some templates do not ship pxf-site.xml per server; create a minimal one 
when missing.
+  for server_dir in "$PXF_BASE/servers/default" 
"$PXF_BASE/servers/default-no-impersonation"; do
+    if [ ! -d "$server_dir" ]; then
+      cp -r "$PXF_BASE/servers/default" "$server_dir"
+    fi
+    if [ ! -f "$server_dir/pxf-site.xml" ]; then
+      cat > "$server_dir/pxf-site.xml" <<'XML'
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
+</configuration>
+XML
+    fi
+  done
+  if ! grep -q "pxf.service.user.name" 
"$PXF_BASE/servers/default-no-impersonation/pxf-site.xml"; then
+    sed -i 's#</configuration>#  <property>\n    
<name>pxf.service.user.name</name>\n    <value>foobar</value>\n  </property>\n  
<property>\n    <name>pxf.service.user.impersonation</name>\n    
<value>false</value>\n  </property>\n</configuration>#' 
"$PXF_BASE/servers/default-no-impersonation/pxf-site.xml"
+  fi
+}
 
-# Copy automation surefire reports (if they exist)
-if [ -d ~/workspace/cloudberry-pxf/automation/target/surefire-reports ]; then
-    cp -r ~/workspace/cloudberry-pxf/automation/target/surefire-reports 
"$TEST_RESULTS_DIR/"
-fi
+prepare_hadoop_stack() {
+  log "prepare Hadoop/Hive/HBase stack"
+  export JAVA_HOME="${JAVA_HADOOP}"
+  export PATH="$JAVA_HOME/bin:$HADOOP_ROOT/bin:$HIVE_ROOT/bin:$PATH"
+  source "${GPHD_ROOT}/bin/gphd-env.sh"
+  cd "${REPO_DIR}/automation"
+  make symlink_pxf_jars
+  cp /home/gpadmin/automation_tmp_lib/pxf-hbase.jar "$GPHD_ROOT/hbase/lib/" || 
true
+  # Ensure HBase sees PXF comparator classes even if automation_tmp_lib was 
empty
+  if [ ! -f "${GPHD_ROOT}/hbase/lib/pxf-hbase.jar" ]; then
+    pxf_app=$(ls -1v /usr/local/pxf/application/pxf-app-*.jar | grep -v 
'plain' | tail -n 1)
+    unzip -qq -j "${pxf_app}" 'BOOT-INF/lib/pxf-hbase-*.jar' -d 
"${GPHD_ROOT}/hbase/lib/"
+  fi
+  # clean stale Hive locks and stop any leftover services to avoid start 
failures
+  rm -f "${GPHD_ROOT}/storage/hive/metastore_db/"*.lck 2>/dev/null || true
+  rm -f "${GPHD_ROOT}/storage/pids"/hive-*.pid 2>/dev/null || true
+  if pgrep -f HiveMetaStore >/dev/null 2>&1; then
+    "${GPHD_ROOT}/bin/hive-service.sh" metastore stop || true
+  fi
+  if pgrep -f HiveServer2 >/dev/null 2>&1; then
+    "${GPHD_ROOT}/bin/hive-service.sh" hiveserver2 stop || true
+  fi
+  if [ ! -d "${GPHD_ROOT}/storage/hadoop/dfs/name/current" ]; then
+    ${GPHD_ROOT}/bin/init-gphd.sh
+  fi
+  if ! ${GPHD_ROOT}/bin/start-gphd.sh; then
+    log "start-gphd.sh returned non-zero (services may already be running), 
continue"
+  fi
+  if ! ${GPHD_ROOT}/bin/start-zookeeper.sh; then
+    log "start-zookeeper.sh returned non-zero (may already be running)"
+  fi
+  # ensure HBase is up
+  if ! ${GPHD_ROOT}/bin/start-hbase.sh; then
+    log "start-hbase.sh returned non-zero (services may already be running), 
continue"
+  fi
+  start_hive_services
+}
 
-# Copy automation logs (if they exist)
-if [ -d ~/workspace/cloudberry-pxf/automation/automation_logs ]; then
-    cp -r ~/workspace/cloudberry-pxf/automation/automation_logs 
"$TEST_RESULTS_DIR/"
-fi
+start_hive_services() {
+  log "start Hive metastore and HiveServer2 (NOSASL)"
+  export JAVA_HOME="${JAVA_HADOOP}"
+  export PATH="${JAVA_HOME}/bin:${HIVE_ROOT}/bin:${HADOOP_ROOT}/bin:${PATH}"
+  export HIVE_HOME="${HIVE_ROOT}"
+  export HADOOP_HOME="${HADOOP_ROOT}"
+  # bump HS2 heap to reduce Tez OOMs during tests
+  export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
+  export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -Xms512m 
${HADOOP_CLIENT_OPTS:-}"
+
+  # ensure clean state
+  pkill -f HiveServer2 || true
+  pkill -f HiveMetaStore || true
+  rm -rf "${GPHD_ROOT}/storage/hive/metastore_db" 2>/dev/null || true
+  rm -f "${GPHD_ROOT}/storage/logs/derby.log" 2>/dev/null || true
+  rm -f "${GPHD_ROOT}/storage/pids"/hive-*.pid 2>/dev/null || true
+
+  # always re-init Derby schema to avoid stale locks; if the DB already 
exists, wipe and retry once
+  if ! PATH="${HIVE_ROOT}/bin:${HADOOP_ROOT}/bin:${PATH}" \
+        JAVA_HOME="${JAVA_HADOOP}" \
+        schematool -dbType derby -initSchema -verbose; then
+    log "schematool failed on first attempt, cleaning metastore_db and 
retrying"
+    rm -rf "${GPHD_ROOT}/storage/hive/metastore_db" 2>/dev/null || true
+    rm -f "${GPHD_ROOT}/storage/logs/derby.log" 2>/dev/null || true
+    PATH="${HIVE_ROOT}/bin:${HADOOP_ROOT}/bin:${PATH}" \
+      JAVA_HOME="${JAVA_HADOOP}" \
+      schematool -dbType derby -initSchema -verbose || die "schematool 
initSchema failed"
+  fi
+
+  # start metastore
+  HIVE_OPTS="--hiveconf 
javax.jdo.option.ConnectionURL=jdbc:derby:;databaseName=${GPHD_ROOT}/storage/hive/metastore_db;create=true"
 \
+    "${GPHD_ROOT}/bin/hive-service.sh" metastore start
+
+  # wait for 9083
+  local ok=false
+  for _ in 1 2 3 4 5 6 7 8 9 10; do
+    if bash -c ">/dev/tcp/localhost/9083" >/dev/null 2>&1; then
+      ok=true
+      break
+    fi
+    sleep 2
+  done
+  if [ "${ok}" != "true" ]; then
+    die "Hive metastore not reachable on 9083"
+  fi
+
+  # start HS2 with NOSASL
+  HIVE_OPTS="--hiveconf hive.server2.authentication=NOSASL --hiveconf 
hive.metastore.uris=thrift://localhost:9083 --hiveconf 
javax.jdo.option.ConnectionURL=jdbc:derby:;databaseName=${GPHD_ROOT}/storage/hive/metastore_db;create=true"
 \
+    "${GPHD_ROOT}/bin/hive-service.sh" hiveserver2 start
+}
 
-echo "Test execution completed. Results available in $TEST_RESULTS_DIR"
-ls -la "$TEST_RESULTS_DIR"
+run_tests() {
+  if [ "${RUN_TESTS:-true}" != "true" ]; then
+    log "RUN_TESTS=false, skipping automation run"
+    return
+  fi
+  log "running tests group=${GROUP:-}"
+  "${PXF_SCRIPTS}/run_tests.sh" "${GROUP:-}"
+}
 
+main() {
+  detect_java_paths
+  setup_locale_and_packages
+  setup_ssh
+  build_cloudberry
+  relax_pg_hba
+  build_pxf
+  configure_pxf
+  prepare_hadoop_stack
+  health_check
+  #run_tests
+  log "entrypoint finished; keeping container alive"
+  tail -f /dev/null
+}
 
-# Keep container running
-#tail -f /dev/null
\ No newline at end of file
+main "$@"
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/pxf-env.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/pxf-env.sh
index c366ab96..357a63df 100755
--- a/concourse/docker/pxf-cbdb-dev/ubuntu/script/pxf-env.sh
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/pxf-env.sh
@@ -1,24 +1,50 @@
 #!/bin/bash
-# PXF Environment Variables
-export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64
-export PATH=$JAVA_HOME/bin:$PATH
-export GPHOME=/usr/local/cloudberry-db
-export PATH=$GPHOME/bin:$PATH
-export GOPATH=$HOME/go
-export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
-export PXF_HOME=/usr/local/pxf
-export PXF_BASE=$HOME/pxf-base
-export PATH=$PXF_HOME/bin:$PATH
+# Centralized environment for Cloudberry + PXF + Hadoop stack
 
-# Source Cloudberry environment
-if [ -f "$GPHOME/cloudberry-env.sh" ]; then
-    source $GPHOME/cloudberry-env.sh
-fi
+# --------------------------------------------------------------------
+# Architecture-aware Java selections
+# --------------------------------------------------------------------
+case "$(uname -m)" in
+  aarch64|arm64)
+    JAVA_BUILD=${JAVA_BUILD:-/usr/lib/jvm/java-11-openjdk-arm64}
+    JAVA_HADOOP=${JAVA_HADOOP:-/usr/lib/jvm/java-8-openjdk-arm64}
+    ;;
+  x86_64|amd64)
+    JAVA_BUILD=${JAVA_BUILD:-/usr/lib/jvm/java-11-openjdk-amd64}
+    JAVA_HADOOP=${JAVA_HADOOP:-/usr/lib/jvm/java-8-openjdk-amd64}
+    ;;
+  *)
+    JAVA_BUILD=${JAVA_BUILD:-/usr/lib/jvm/java-11-openjdk}
+    JAVA_HADOOP=${JAVA_HADOOP:-/usr/lib/jvm/java-8-openjdk}
+    ;;
+esac
 
-# Source demo cluster environment if available
-if [ -f "/home/gpadmin/workspace/cloudberry/gpAux/gpdemo/gpdemo-env.sh" ]; then
-    source /home/gpadmin/workspace/cloudberry/gpAux/gpdemo/gpdemo-env.sh
-fi
+# --------------------------------------------------------------------
+# Core paths
+# --------------------------------------------------------------------
+export GPHOME=${GPHOME:-/usr/local/cloudberry-db}
+export PXF_HOME=${PXF_HOME:-/usr/local/pxf}
+export PXF_BASE=${PXF_BASE:-/home/gpadmin/pxf-base}
+export GPHD_ROOT=${GPHD_ROOT:-/home/gpadmin/workspace/singlecluster}
+export GOPATH=${GOPATH:-/home/gpadmin/go}
+export 
PATH="$GPHD_ROOT/bin:$GPHD_ROOT/hadoop/bin:$GPHD_ROOT/hive/bin:$GPHD_ROOT/hbase/bin:$GPHD_ROOT/zookeeper/bin:$JAVA_BUILD/bin:/usr/local/go/bin:$GOPATH/bin:$GPHOME/bin:$PXF_HOME/bin:$PATH"
+export COMMON_JAVA_OPTS=${COMMON_JAVA_OPTS:-}
 
-echo "PXF environment loaded successfully"
+# --------------------------------------------------------------------
+# Database defaults
+# --------------------------------------------------------------------
+export PGHOST=${PGHOST:-localhost}
+export PGPORT=${PGPORT:-7000}
+export 
MASTER_DATA_DIRECTORY=${MASTER_DATA_DIRECTORY:-/home/gpadmin/workspace/cloudberry/gpAux/gpdemo/datadirs/qddir/demoDataDir-1}
 
+# --------------------------------------------------------------------
+# PXF defaults
+# --------------------------------------------------------------------
+export PXF_JVM_OPTS=${PXF_JVM_OPTS:-"-Xmx512m -Xms256m"}
+export PXF_HOST=${PXF_HOST:-localhost}
+
+# Source Cloudberry env and demo cluster if present
+[ -f "$GPHOME/cloudberry-env.sh" ] && source "$GPHOME/cloudberry-env.sh"
+[ -f "/home/gpadmin/workspace/cloudberry/gpAux/gpdemo/gpdemo-env.sh" ] && 
source /home/gpadmin/workspace/cloudberry/gpAux/gpdemo/gpdemo-env.sh
+
+echo "[pxf-env] loaded (JAVA_BUILD=${JAVA_BUILD}, JAVA_HADOOP=${JAVA_HADOOP})"
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh
new file mode 100755
index 00000000..4f8249e9
--- /dev/null
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/run_tests.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+set -euo pipefail
+
+# Run automation tests only (assumes build/env already prepared)
+
+# Use a unique var name to avoid clobbering by sourced env scripts
+RUN_TESTS_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+# Repo root is five levels up from script dir
+REPO_ROOT="$(cd "${RUN_TESTS_DIR}/../../../../.." && pwd)"
+cd "${REPO_ROOT}/automation"
+
+# Load centralized env (sets JAVA_BUILD/HADOOP, GPHD_ROOT, PGPORT, etc.)
+source "${RUN_TESTS_DIR}/pxf-env.sh"
+source "${RUN_TESTS_DIR}/utils.sh"
+
+# Test-related defaults (kept close to test runner)
+export GROUP=${GROUP:-smoke}
+export RUN_TESTS=${RUN_TESTS:-true}
+export PXF_SKIP_TINC=${PXF_SKIP_TINC:-false}
+export EXCLUDED_GROUPS=${EXCLUDED_GROUPS:-}
+# Keep test data on HDFS between classes to avoid missing inputs
+export PXF_TEST_KEEP_DATA=${PXF_TEST_KEEP_DATA:-true}
+
+# Hadoop/Hive/HBase env
+export JAVA_HOME="${JAVA_HADOOP}"
+export PATH="$JAVA_HOME/bin:$PATH"
+source "${GPHD_ROOT}/bin/gphd-env.sh"
+
+# Force local PostgreSQL to IPv4 to avoid ::1 pg_hba misses in proxy tests
+export PGHOST=127.0.0.1
+# Match historical float string output used by expected files
+export PGOPTIONS=${PGOPTIONS:-"-c extra_float_digits=0"}
+
+# Ensure Cloudberry env if present
+[ -f "/usr/local/cloudberry-db/cloudberry-env.sh" ] && source 
/usr/local/cloudberry-db/cloudberry-env.sh
+[ -f "/home/gpadmin/workspace/cloudberry/gpAux/gpdemo/gpdemo-env.sh" ] && 
source /home/gpadmin/workspace/cloudberry/gpAux/gpdemo/gpdemo-env.sh
+
+# Add Hadoop/HBase/Hive bins
+export HADOOP_HOME=${HADOOP_HOME:-${GPHD_ROOT}/hadoop}
+export HBASE_HOME=${HBASE_HOME:-${GPHD_ROOT}/hbase}
+export HIVE_HOME=${HIVE_HOME:-${GPHD_ROOT}/hive}
+export PATH="${HADOOP_HOME}/bin:${HBASE_HOME}/bin:${HIVE_HOME}/bin:${PATH}"
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-${HADOOP_HOME}/etc/hadoop}
+export YARN_CONF_DIR=${YARN_CONF_DIR:-${HADOOP_HOME}/etc/hadoop}
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-${HBASE_HOME}/conf}
+export HDFS_URI=${HDFS_URI:-hdfs://localhost:8020}
+export HADOOP_OPTS="-Dfs.defaultFS=${HDFS_URI} ${HADOOP_OPTS:-}"
+export HADOOP_CLIENT_OPTS="${HADOOP_OPTS}"
+export MAVEN_OPTS="-Dfs.defaultFS=${HDFS_URI} ${MAVEN_OPTS:-}"
+
+# Force Hive endpoints to localhost unless explicitly overridden (default sut 
points to cdw)
+export HIVE_HOST=${HIVE_HOST:-localhost}
+export HIVE_PORT=${HIVE_PORT:-10000}
+export HIVE_SERVER_HOST=${HIVE_SERVER_HOST:-${HIVE_HOST}}
+export HIVE_SERVER_PORT=${HIVE_SERVER_PORT:-${HIVE_PORT}}
+
+# Run health check
+health_check
+
+# Simple wrappers per group
+smoke_test() {
+  make GROUP="smoke"
+  echo "[run_tests] GROUP=smoke finished"
+}
+
+hcatalog_test() {
+  make GROUP="hcatalog"
+  echo "[run_tests] GROUP=hcatalog finished"
+}
+
+hcfs_test() {
+  make GROUP="hcfs"
+  echo "[run_tests] GROUP=hcfs finished"
+}
+
+hdfs_test() {
+  make GROUP="hdfs"
+  echo "[run_tests] GROUP=hdfs finished"
+}
+
+hive_test() {
+  make GROUP="hive"
+  echo "[run_tests] GROUP=hive finished"
+}
+
+main() {
+  smoke_test
+  hcatalog_test
+  hcfs_test
+  hdfs_test
+  hive_test
+}
+
+main "$@"
diff --git a/concourse/docker/pxf-cbdb-dev/ubuntu/script/utils.sh 
b/concourse/docker/pxf-cbdb-dev/ubuntu/script/utils.sh
new file mode 100644
index 00000000..dbf8c844
--- /dev/null
+++ b/concourse/docker/pxf-cbdb-dev/ubuntu/script/utils.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+# Shared health-check helpers for entrypoint and run_tests
+set -euo pipefail
+
+# Fallback log/die in case caller didn't define them
+log() { echo "[utils][$(date '+%F %T')] $*"; }
+die() { log "ERROR $*"; exit 1; }
+
+wait_port() {
+  local host="$1" port="$2" retries="${3:-10}" sleep_sec="${4:-2}"
+  local i
+  for i in $(seq 1 "${retries}"); do
+    if (echo >/dev/tcp/"${host}"/"${port}") >/dev/null 2>&1; then
+      return 0
+    fi
+    sleep "${sleep_sec}"
+  done
+  return 1
+}
+
+check_jvm_procs() {
+  if command -v jps >/dev/null 2>&1; then
+    jps_out=$(jps)
+  else
+    jps_out=$(ps -eo cmd | grep java)
+  fi
+  echo "$jps_out"
+  echo "$jps_out" | grep -q NameNode || die "NameNode not running"
+  echo "$jps_out" | grep -q DataNode || die "DataNode not running"
+}
+
+check_hbase() {
+  local hbase_host="${HBASE_HOST:-$(hostname -I | awk '{print $1}')}"
+  hbase_host=${hbase_host:-127.0.0.1}
+
+  if ! echo "$jps_out" | grep -q HMaster && ! pgrep -f HMaster >/dev/null 
2>&1; then
+    die "HBase HMaster not running"
+  fi
+
+  if ! echo "$jps_out" | grep -q HRegionServer && ! pgrep -f HRegionServer 
>/dev/null 2>&1; then
+    die "HBase RegionServer not running"
+  fi
+
+  local hbase_ok=true
+  if ! printf "status 'simple'\n" | "${HBASE_ROOT}/bin/hbase" shell -n 
>/tmp/hbase_status.log 2>&1; then
+    hbase_ok=false
+  fi
+  if ! (echo >/dev/tcp/"${hbase_host}"/16000) >/dev/null 2>&1; then
+    hbase_ok=false
+  fi
+  if [ "${hbase_ok}" != "true" ]; then
+    [ -f /tmp/hbase_status.log ] && cat /tmp/hbase_status.log
+    die "HBase health check failed (status or port 16000 on ${hbase_host})"
+  fi
+}
+
+check_hdfs() {
+  hdfs dfs -test -d / || die "HDFS root not accessible"
+}
+
+check_hive() {
+  wait_port localhost 9083 10 2 || die "Hive metastore not reachable on 9083"
+  wait_port "${HIVE_HOST:-localhost}" "${HIVE_PORT:-10000}" 10 2 || die 
"HiveServer2 port not reachable"
+
+  local beeline_ok=true
+  if command -v beeline >/dev/null 2>&1; then
+    beeline_ok=false
+    for _ in 1 2 3 4 5; do
+      if beeline -u 
"jdbc:hive2://${HIVE_HOST:-localhost}:${HIVE_PORT:-10000}/default;auth=noSasl" \
+          -n "${HIVE_USER:-gpadmin}" -p "${HIVE_PASSWORD:-gpadmin}" \
+          -e "select 1" >/tmp/hive_health.log 2>&1; then
+        beeline_ok=true
+        break
+      fi
+      sleep 2
+    done
+  fi
+
+  if [ "${beeline_ok}" != "true" ]; then
+    [ -f /tmp/hive_health.log ] && cat /tmp/hive_health.log
+    die "HiveServer2 query failed"
+  fi
+}
+
+check_pxf() {
+  if ! curl -sf http://localhost:5888/actuator/health >/dev/null 2>&1; then
+    die "PXF actuator health endpoint not responding"
+  fi
+}
+
+health_check() {
+  log "sanity check Hadoop/Hive/HBase/PXF"
+  GPHD_ROOT=${GPHD_ROOT:-/home/gpadmin/workspace/singlecluster}
+  HADOOP_ROOT=${HADOOP_ROOT:-${GPHD_ROOT}/hadoop}
+  HBASE_ROOT=${HBASE_ROOT:-${GPHD_ROOT}/hbase}
+  HIVE_ROOT=${HIVE_ROOT:-${GPHD_ROOT}/hive}
+  JAVA_HADOOP=${JAVA_HADOOP:-/usr/lib/jvm/java-8-openjdk-amd64}
+
+  export JAVA_HOME="${JAVA_HADOOP}"
+  export 
PATH="$JAVA_HOME/bin:$HADOOP_ROOT/bin:$HIVE_ROOT/bin:$HBASE_ROOT/bin:$PATH"
+  [ -f "${GPHD_ROOT}/bin/gphd-env.sh" ] && source 
"${GPHD_ROOT}/bin/gphd-env.sh"
+
+  check_jvm_procs
+  check_hbase
+  check_hdfs
+  check_hive
+  check_pxf
+  log "all components healthy: HDFS/HBase/Hive/PXF"
+}
diff --git a/concourse/singlecluster/Dockerfile 
b/concourse/singlecluster/Dockerfile
index d105038b..5232f990 100644
--- a/concourse/singlecluster/Dockerfile
+++ b/concourse/singlecluster/Dockerfile
@@ -4,7 +4,9 @@ ENV DEBIAN_FRONTEND noninteractive
 
 RUN sudo apt-get update &&  \
     sudo apt-get install -y --no-install-recommends \
-      curl ca-certificates
+      curl ca-certificates \
+      openjdk-8-jdk-headless \
+      openjdk-11-jdk-headless
 
 # TODO: update hive to support java 11+
 ENV    HADOOP_VERSION=3.1.2
@@ -42,7 +44,9 @@ RUN mkdir -p $HADOOP_ROOT && \
     curl -fSL "$HADOOP_URL" -o hadoop.tar.gz && \
     echo "$HADOOP_SHA512 hadoop.tar.gz" | sha512sum -c && \
     tar xvf hadoop.tar.gz -C $HADOOP_ROOT --strip-components 1  
--exclude="share/doc/*" --exclude="*-sources.jar" && \
-    rm hadoop.tar.gz
+    rm hadoop.tar.gz && \
+    curl -fSL 
"https://repo1.maven.org/maven2/javax/activation/javax.activation-api/1.2.0/javax.activation-api-1.2.0.jar";
 \
+      -o $HADOOP_ROOT/share/hadoop/common/lib/javax.activation-api-1.2.0.jar
 
 RUN mkdir -p $HIVE_ROOT && \
     curl -fSL $HIVE_URL -o hive.tar.gz && \
@@ -68,10 +72,18 @@ RUN mkdir -p $TEZ_ROOT && \
     tar xvf tez.tar.gz -C $TEZ_ROOT --strip-components 1 && \
     rm tez.tar.gz
 
-# Install Go (required by PXF).
-RUN mkdir -p /tmp/pxf_src/ && cd /tmp \
-    && wget -O go.tgz -q https://go.dev/dl/go1.23.3.linux-amd64.tar.gz \
-    && sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go.tgz && rm 
go.tgz
+# Install Go (required by PXF). Pick archive based on architecture 
(amd64/arm64).
+ARG TARGETARCH
+RUN set -e; \
+    arch="${TARGETARCH:-$(uname -m)}"; \
+    case "$arch" in \
+      amd64|x86_64) go_arch="amd64" ;; \
+      arm64|aarch64) go_arch="arm64" ;; \
+      *) echo "Unsupported architecture: ${arch}"; exit 1 ;; \
+    esac; \
+    mkdir -p /tmp/pxf_src/ && cd /tmp && \
+    wget -O go.tgz -q "https://go.dev/dl/go1.23.3.linux-${go_arch}.tar.gz"; && \
+    sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go.tgz && rm 
go.tgz
 
 
 COPY ./templates $GPHD_ROOT
diff --git a/concourse/singlecluster/bin/gphd-env.sh 
b/concourse/singlecluster/bin/gphd-env.sh
index 79f2714e..7f3f9381 100755
--- a/concourse/singlecluster/bin/gphd-env.sh
+++ b/concourse/singlecluster/bin/gphd-env.sh
@@ -47,9 +47,20 @@ export HBASE_CONF=${HBASE_ROOT}/conf
 export HIVE_CONF=${HIVE_ROOT}/conf
 export TEZ_CONF=${TEZ_ROOT}/conf
 export RANGER_CONF=${RANGER_ROOT}/conf
+export HADOOP_COMMON_LIB=${HADOOP_ROOT}/share/hadoop/common/lib
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH:-}
 
 export TEZ_JARS=$(echo "$TEZ_ROOT"/*.jar | tr ' ' ':'):$(echo 
"$TEZ_ROOT"/lib/*.jar | tr ' ' ':')
 
+ensure_activation_jar() {
+  local jar="$HADOOP_COMMON_LIB/javax.activation-api-1.2.0.jar"
+  if [ ! -f "$jar" ]; then
+    echo "Fetching javax.activation-api for Java11 runtime..."
+    curl -fSL 
"https://repo1.maven.org/maven2/javax/activation/javax.activation-api/1.2.0/javax.activation-api-1.2.0.jar";
 -o "$jar" || return 1
+  fi
+  export HADOOP_CLASSPATH="$HADOOP_CLASSPATH:$jar"
+}
+
 function cluster_initialized()
 {
        if [ -d ${HADOOP_STORAGE_ROOT}/dfs/name ]; then
@@ -82,4 +93,3 @@ function zookeeper_running()
        done
        return ${retval}
 }
-
diff --git a/concourse/singlecluster/bin/start-gphd.sh 
b/concourse/singlecluster/bin/start-gphd.sh
index e16cfd1b..3fe07e85 100755
--- a/concourse/singlecluster/bin/start-gphd.sh
+++ b/concourse/singlecluster/bin/start-gphd.sh
@@ -5,6 +5,8 @@ root=`cd \`dirname $0\`/..;pwd`
 bin=${root}/bin
 . ${bin}/gphd-env.sh
 
+ensure_activation_jar || exit 1
+
 ${bin}/start-hdfs.sh || exit 1
 
 ${HADOOP_BIN}/hdfs dfsadmin -safemode wait
diff --git a/concourse/singlecluster/bin/start-zookeeper.sh 
b/concourse/singlecluster/bin/start-zookeeper.sh
index 0f88aac1..673b90b9 100755
--- a/concourse/singlecluster/bin/start-zookeeper.sh
+++ b/concourse/singlecluster/bin/start-zookeeper.sh
@@ -8,6 +8,8 @@ bin=${root}/bin
 zookeeper_cfg=$ZOOKEEPER_CONF/zoo.cfg
 zookeeper_cfg_tmp=$zookeeper_cfg.preped
 
+sudo chown -R gpadmin:gpadmin "${ZOOKEEPER_CONF}"
+
 sed "s|dataDir.*$|dataDir=$ZOOKEEPER_STORAGE_ROOT|" ${zookeeper_cfg} > 
${zookeeper_cfg_tmp}
 rm -f ${zookeeper_cfg}
 mv ${zookeeper_cfg_tmp} ${zookeeper_cfg}
diff --git a/concourse/singlecluster/conf/gphd-conf.sh 
b/concourse/singlecluster/conf/gphd-conf.sh
index f79180eb..4d9c50c8 100755
--- a/concourse/singlecluster/conf/gphd-conf.sh
+++ b/concourse/singlecluster/conf/gphd-conf.sh
@@ -1,5 +1,12 @@
 # paths
-export JAVA_HOME=${JAVA_HOME:=/Library/Java/Home}
+# Prefer JAVA_HADOOP (from pxf-env); otherwise fall back to a default JDK8 
path.
+if [ -z "${JAVA_HOME:-}" ]; then
+  if [ -n "${JAVA_HADOOP:-}" ]; then
+    export JAVA_HOME="${JAVA_HADOOP}"
+  else
+    export JAVA_HOME=/usr/lib/jvm/java-8-openjdk
+  fi
+fi
 export STORAGE_ROOT=$GPHD_ROOT/storage
 export HADOOP_STORAGE_ROOT=$STORAGE_ROOT/hadoop
 export ZOOKEEPER_STORAGE_ROOT=$STORAGE_ROOT/zookeeper
@@ -9,7 +16,7 @@ export PXF_STORAGE_ROOT=$STORAGE_ROOT/pxf
 export RANGER_STORAGE_ROOT=$STORAGE_ROOT/ranger
 
 # settings
-export SLAVES=${SLAVES:-3}
+export SLAVES=${SLAVES:-1}
 
 # Automatically start HBase during GPHD startup
 export START_HBASE=true
diff --git a/concourse/singlecluster/templates/hadoop/etc/hadoop/hadoop-env.sh 
b/concourse/singlecluster/templates/hadoop/etc/hadoop/hadoop-env.sh
index 7ce50a38..abc85743 100755
--- a/concourse/singlecluster/templates/hadoop/etc/hadoop/hadoop-env.sh
+++ b/concourse/singlecluster/templates/hadoop/etc/hadoop/hadoop-env.sh
@@ -20,5 +20,8 @@ export HADOOP_LOG_DIR=$LOGS_ROOT
 # The directory where pid files are stored. /tmp by default.
 export HADOOP_PID_DIR=$PIDS_ROOT
 
-# FIXME: remove after upgrading to new Hive version
-export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
\ No newline at end of file
+# Rely on JAVA_HOME provided by gphd-env.sh (which already auto-detects 
arch/JDK).
+if [ -z "${JAVA_HOME:-}" ]; then
+  echo "Error: JAVA_HOME is not set (expected from gphd-env.sh)."
+  exit 1
+fi
diff --git a/concourse/singlecluster/templates/hadoop/etc/hadoop/yarn-env.sh 
b/concourse/singlecluster/templates/hadoop/etc/hadoop/yarn-env.sh
index 623cc561..2a023adc 100755
--- a/concourse/singlecluster/templates/hadoop/etc/hadoop/yarn-env.sh
+++ b/concourse/singlecluster/templates/hadoop/etc/hadoop/yarn-env.sh
@@ -29,16 +29,9 @@ export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
 # some Java parameters
 # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
 
-# FIXME: remove after upgrading to new Hive version
-export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-  
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
+# Rely on JAVA_HOME provided by gphd-env.sh (which already auto-detects 
arch/JDK).
+if [ -z "${JAVA_HOME:-}" ]; then
+  echo "Error: JAVA_HOME is not set (expected from gphd-env.sh)."
   exit 1
 fi
 
@@ -86,4 +79,3 @@ if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
 fi  
 YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
 
-
diff --git a/concourse/singlecluster/templates/hbase/conf/hbase-env.sh 
b/concourse/singlecluster/templates/hbase/conf/hbase-env.sh
index a3644e1a..36f3aadf 100755
--- a/concourse/singlecluster/templates/hbase/conf/hbase-env.sh
+++ b/concourse/singlecluster/templates/hbase/conf/hbase-env.sh
@@ -93,5 +93,8 @@ export HBASE_PID_DIR=$PIDS_ROOT
 # Tell HBase whether it should manage it's own instance of Zookeeper or not.
 export HBASE_MANAGES_ZK=false
 
-# FIXME: remove after upgrading to new Hive version
-export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
\ No newline at end of file
+# Prefer JAVA_HOME from gphd-env.sh; fail fast if missing to avoid divergent 
per-service detection.
+if [ -z "${JAVA_HOME:-}" ]; then
+  echo "Error: JAVA_HOME must be set (gphd-env.sh should export JAVA_HADOOP)."
+  exit 1
+fi
diff --git a/concourse/singlecluster/templates/hive/conf/hive-site.xml 
b/concourse/singlecluster/templates/hive/conf/hive-site.xml
index 35b2891e..db816bb5 100755
--- a/concourse/singlecluster/templates/hive/conf/hive-site.xml
+++ b/concourse/singlecluster/templates/hive/conf/hive-site.xml
@@ -7,16 +7,41 @@
                <name>hive.metastore.uris</name>
                <value>thrift://localhost:9083</value>
        </property>
+    <!-- Pin metastore to a deterministic Derby location inside singlecluster 
-->
+    <property>
+        <name>javax.jdo.option.ConnectionURL</name>
+        
<value>jdbc:derby:;databaseName=/home/gpadmin/workspace/singlecluster/storage/hive/metastore_db;create=true</value>
+    </property>
+    <property>
+        <name>javax.jdo.option.ConnectionDriverName</name>
+        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    </property>
        <property>
                <name>hive.server2.enable.impersonation</name>
                <value>true</value>
                <description>Set this property to enable impersonation in Hive 
Server 2</description>
        </property>
-       <property>
-               <name>hive.server2.enable.doAs</name>
-               <value>false</value>
-               <description>Set this property to enable impersonation in Hive 
Server 2</description>
-       </property>
+    <property>
+        <name>hive.server2.enable.doAs</name>
+        <value>false</value>
+        <description>Set this property to enable impersonation in Hive Server 
2</description>
+    </property>
+    <property>
+        <name>hive.server2.authentication</name>
+        <value>NOSASL</value>
+    </property>
+    <property>
+        <name>hive.server2.transport.mode</name>
+        <value>binary</value>
+    </property>
+    <property>
+        <name>hive.server2.thrift.port</name>
+        <value>10000</value>
+    </property>
+    <property>
+        <name>hive.server2.thrift.bind.host</name>
+        <value>0.0.0.0</value>
+    </property>
        <property>
                <name>hive.execution.engine</name>
                <value>tez</value>
@@ -55,5 +80,30 @@
         <name>hive.stats.autogather</name>
         <value>false</value>
     </property>
+    <!-- Enable ACID/transactional tables by default -->
+    <property>
+        <name>hive.support.concurrency</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>hive.txn.manager</name>
+        <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
+    </property>
+    <property>
+        <name>hive.compactor.initiator.on</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>hive.compactor.worker.threads</name>
+        <value>1</value>
+    </property>
+    <property>
+        <name>hive.enforce.bucketing</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>hive.exec.dynamic.partition.mode</name>
+        <value>nonstrict</value>
+    </property>
 
 </configuration>


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to