This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new f8ed559eb50 Fix some answer files
f8ed559eb50 is described below

commit f8ed559eb506ac18e2ec979675b48f296d8594e2
Author: Jinbao Chen <[email protected]>
AuthorDate: Fri Jan 2 16:48:37 2026 +0800

    Fix some answer files
---
 src/backend/access/common/reloptions.c             | 13 +++---
 src/test/regress/expected/analyze.out              |  9 +++--
 src/test/regress/expected/appendonly.out           | 12 +++---
 src/test/regress/expected/domain.out               |  2 +-
 .../expected/index_constraint_naming_partition.out |  7 ++--
 src/test/regress/expected/sreh.out                 | 47 +++++++++++++---------
 src/test/regress/sql/appendonly.sql                | 12 +++---
 src/test/regress/sql/event_trigger_gp.sql          |  6 +--
 src/test/regress/sql/sreh.sql                      | 24 ++++++-----
 9 files changed, 76 insertions(+), 56 deletions(-)

diff --git a/src/backend/access/common/reloptions.c 
b/src/backend/access/common/reloptions.c
index 5d4bc7c07a4..889b3c6628a 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -2031,12 +2031,13 @@ build_local_reloptions(local_relopts *relopts, Datum 
options, bool validate)
 bytea *
 partitioned_table_reloptions(Datum reloptions, bool validate)
 {
-       if (validate && reloptions)
-               ereport(ERROR,
-                               errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                               errmsg("cannot specify storage parameters for a 
partitioned table"),
-                               errhint("Specify storage parameters for its 
leaf partitions instead."));
-       return NULL;
+       /*
+        * There are no options for partitioned tables yet, but this is able to 
do
+        * some validation.
+        */
+       return (bytea *) build_reloptions(reloptions, validate,
+                                                                         
RELOPT_KIND_PARTITIONED,
+                                                                         0, 
NULL, 0);
 }
 
 /*
diff --git a/src/test/regress/expected/analyze.out 
b/src/test/regress/expected/analyze.out
index 843a728c9b6..294cc4daa16 100644
--- a/src/test/regress/expected/analyze.out
+++ b/src/test/regress/expected/analyze.out
@@ -1292,10 +1292,12 @@ set optimizer_analyze_root_partition=on;
 set optimizer_analyze_midlevel_partition=off;
 analyze verbose p1;
 INFO:  analyzing "public.p1"
-INFO:  Executing SQL: select pg_catalog.gp_acquire_sample_rows(24981, 10000, 
'f');
+INFO:  skipping analyze of "public.p1" inheritance tree --- this inheritance 
tree contains no child tables
+INFO:  Executing SQL: select pg_catalog.gp_acquire_sample_rows(56435, 10000, 
'f');
 analyze verbose p2;
 INFO:  analyzing "public.p2"
-INFO:  Executing SQL: select pg_catalog.gp_acquire_sample_rows(24984, 10000, 
'f');
+INFO:  skipping analyze of "public.p2" inheritance tree --- this inheritance 
tree contains no child tables
+INFO:  Executing SQL: select pg_catalog.gp_acquire_sample_rows(56438, 10000, 
'f');
 select * from pg_stats where tablename like 'part2';
  schemaname | tablename | attname | inherited | null_frac | avg_width | 
n_distinct | most_common_vals | most_common_freqs | histogram_bounds | 
correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram 
 
------------+-----------+---------+-----------+-----------+-----------+------------+------------------+-------------------+------------------+-------------+-------------------+------------------------+----------------------
@@ -1304,7 +1306,8 @@ select * from pg_stats where tablename like 'part2';
 set optimizer_analyze_midlevel_partition=on;
 analyze verbose p2;
 INFO:  analyzing "public.p2"
-INFO:  Executing SQL: select pg_catalog.gp_acquire_sample_rows(24984, 10000, 
'f');
+INFO:  skipping analyze of "public.p2" inheritance tree --- this inheritance 
tree contains no child tables
+INFO:  Executing SQL: select pg_catalog.gp_acquire_sample_rows(56438, 10000, 
'f');
 INFO:  analyzing "public.part2" inheritance tree
 INFO:  Executing SQL: select pg_catalog.gp_acquire_sample_rows(24975, 10000, 
't');
 select * from pg_stats where tablename like 'part2';
diff --git a/src/test/regress/expected/appendonly.out 
b/src/test/regress/expected/appendonly.out
index 0ed98a3d4b4..a8d2775798b 100644
--- a/src/test/regress/expected/appendonly.out
+++ b/src/test/regress/expected/appendonly.out
@@ -176,12 +176,14 @@ $$ language sql;
 -------------------- 
 -- supported sql
 --------------------
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set tenk_data :abs_srcdir '/data/tenk.data'
 -- COPY
-COPY tenk_heap FROM '@abs_srcdir@/data/tenk.data';
-COPY tenk_ao1 FROM '@abs_srcdir@/data/tenk.data';
-COPY tenk_ao2 FROM '@abs_srcdir@/data/tenk.data';
-COPY tenk_ao3 FROM '@abs_srcdir@/data/tenk.data';
-COPY tenk_ao4 FROM '@abs_srcdir@/data/tenk.data';
+COPY tenk_heap FROM :'tenk_data';
+COPY tenk_ao1 FROM :'tenk_data';
+COPY tenk_ao2 FROM :'tenk_data';
+COPY tenk_ao3 FROM :'tenk_data';
+COPY tenk_ao4 FROM :'tenk_data';
 ANALYZE tenk_heap;
 ANALYZE tenk_ao1;
 ANALYZE tenk_ao2;
diff --git a/src/test/regress/expected/domain.out 
b/src/test/regress/expected/domain.out
index 4f539fa5511..ed0e3bf117d 100644
--- a/src/test/regress/expected/domain.out
+++ b/src/test/regress/expected/domain.out
@@ -476,7 +476,7 @@ explain (verbose, costs off)
  Update on public.dcomptable
    ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)
          Output: (((d1[1].r := (d1[1].r - '1'::double precision))[1].i := 
(d1[1].i + '1'::double precision))::dcomptypea), ctid, gp_segment_id, 
(DMLAction)
-         ->  Split
+         ->  Split Update
                Output: (((d1[1].r := (d1[1].r - '1'::double precision))[1].i 
:= (d1[1].i + '1'::double precision))::dcomptypea), ctid, gp_segment_id, 
DMLAction
                ->  Seq Scan on public.dcomptable
                      Output: (d1[1].r := (d1[1].r - '1'::double 
precision))[1].i := (d1[1].i + '1'::double precision), ctid, gp_segment_id
diff --git a/src/test/regress/expected/index_constraint_naming_partition.out 
b/src/test/regress/expected/index_constraint_naming_partition.out
index a934d8c406c..c66bddac7e3 100755
--- a/src/test/regress/expected/index_constraint_naming_partition.out
+++ b/src/test/regress/expected/index_constraint_naming_partition.out
@@ -178,9 +178,10 @@ CREATE FUNCTION recreate_two_level_table() RETURNS VOID
 $fn$;
 -- validate that there are no constraints when we start
 SELECT * FROM partition_tables_show_all('r');
-ERROR:  relation "r" does not exist
-CONTEXT:  SQL function "partition_tables" statement 1
-SQL function "partition_tables_show_all" statement 1
+ partition_name | parent_name | root_name | constraint_name | index_name | 
constraint_type 
+----------------+-------------+-----------+-----------------+------------+-----------------
+(0 rows)
+
 -- UNIQUE constraint: validate we correctly add it and can only drop from root
 SELECT recreate_two_level_table();
 NOTICE:  table "r" does not exist, skipping
diff --git a/src/test/regress/expected/sreh.out 
b/src/test/regress/expected/sreh.out
index 99c870d952a..ba51f288590 100755
--- a/src/test/regress/expected/sreh.out
+++ b/src/test/regress/expected/sreh.out
@@ -19,7 +19,9 @@ CREATE TABLE sreh_copy(a int, b int, c int) distributed by(a);
 -- 
 -- ROW reject limit only
 --
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data1.data' DELIMITER '|' SEGMENT 
REJECT LIMIT 1000;
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set bad_data1 :abs_srcdir '/data/bad_data1.data'
+COPY sreh_copy FROM :'bad_data1' DELIMITER '|' SEGMENT REJECT LIMIT 1000;
 NOTICE:  found 10 data formatting errors (10 or more input rows), rejected 
related input data
 SELECT * FROM sreh_copy ORDER BY a,b,c;
  a  | b  | c  
@@ -35,7 +37,7 @@ SELECT * FROM sreh_copy ORDER BY a,b,c;
 -- 
 -- ROW reject limit only - low value that gets reached
 --
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data1.data' DELIMITER '|' SEGMENT 
REJECT LIMIT 2;
+COPY sreh_copy FROM :'bad_data1' DELIMITER '|' SEGMENT REJECT LIMIT 2;
 ERROR:  segment reject limit reached, aborting operation
 DETAIL:  Last error was: invalid input syntax for type integer: "eleven", 
column a
 CONTEXT:  COPY sreh_copy, line 11, column a: "eleven"
@@ -54,13 +56,13 @@ SELECT * FROM sreh_copy ORDER BY a,b,c;
 -- error logs
 --
 DROP TABLE IF EXISTS sreh_copy; CREATE TABLE sreh_copy(a int, b int, c int) 
distributed by(a);
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data1.data' DELIMITER '|' LOG 
ERRORS INTO WHATEVER SEGMENT REJECT LIMIT 1000;
+COPY sreh_copy FROM :'bad_data1' DELIMITER '|' LOG ERRORS INTO WHATEVER 
SEGMENT REJECT LIMIT 1000;
 ERROR:  error table is not supported
 LINE 1: ...ess/data/bad_data1.data' DELIMITER '|' LOG ERRORS INTO WHATE...
                                                              ^
 HINT:  Set gp_ignore_error_table to ignore the [INTO error-table] clause for 
backward compatibility.
 SET gp_ignore_error_table=true;
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data1.data' DELIMITER '|' LOG 
ERRORS INTO WHATEVER SEGMENT REJECT LIMIT 1000;
+COPY sreh_copy FROM :'bad_data1' DELIMITER '|' LOG ERRORS INTO WHATEVER 
SEGMENT REJECT LIMIT 1000;
 WARNING:  error table is not supported
 HINT:  Use gp_read_error_log() and gp_truncate_error_log() to view and manage 
the internal error log associated with your table.
 NOTICE:  found 10 data formatting errors (10 or more input rows), rejected 
related input data
@@ -84,7 +86,7 @@ WITH error_log AS (SELECT gp_read_error_log('sreh_copy')) 
select count(*) from e
 --
 -- error logs - do the same thing again. this time error logs exist and should 
get data appended
 --
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data1.data' DELIMITER '|' LOG 
ERRORS SEGMENT REJECT LIMIT 1000;
+COPY sreh_copy FROM :'bad_data1' DELIMITER '|' LOG ERRORS SEGMENT REJECT LIMIT 
1000;
 NOTICE:  found 10 data formatting errors (10 or more input rows), rejected 
related input data
 SELECT * FROM sreh_copy ORDER BY a,b,c;
  a  | b  | c  
@@ -196,10 +198,11 @@ SELECT * FROM sreh_constr; -- should exist and be empty
 -- so the percent calculation should always be the same regardless of number of
 -- QE's in the system.
 --
+\set bad_data3 :abs_srcdir '/data/bad_data3.data'
 set gp_reject_percent_threshold = 100;
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data3.data' DELIMITER '|' SEGMENT 
REJECT LIMIT 10 PERCENT; --pass
+COPY sreh_copy FROM :'bad_data3' DELIMITER '|' SEGMENT REJECT LIMIT 10 
PERCENT; --pass
 NOTICE:  found 9 data formatting errors (9 or more input rows), rejected 
related input data
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data3.data' DELIMITER '|' SEGMENT 
REJECT LIMIT 2 PERCENT; --fail
+COPY sreh_copy FROM :'bad_data3' DELIMITER '|' SEGMENT REJECT LIMIT 2 PERCENT; 
--fail
 ERROR:  segment reject limit reached, aborting operation
 DETAIL:  Last error was: invalid input syntax for type integer: "BAD", column a
 CONTEXT:  COPY sreh_copy, line 107, column a
@@ -207,11 +210,11 @@ CONTEXT:  COPY sreh_copy, line 107, column a
 -- test PERCENT reject limit logic with custom threshold 10 (only practical 
for test purposes)
 --
 set gp_reject_percent_threshold = 10;
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data3.data' DELIMITER '|' SEGMENT 
REJECT LIMIT 10 PERCENT; --fail
+COPY sreh_copy FROM :'bad_data3' DELIMITER '|' SEGMENT REJECT LIMIT 10 
PERCENT; --fail
 ERROR:  segment reject limit reached, aborting operation
 DETAIL:  Last error was: invalid input syntax for type integer: "BAD", column a
 CONTEXT:  COPY sreh_copy, line 15, column a
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data3.data' DELIMITER '|' SEGMENT 
REJECT LIMIT 20 PERCENT; --pass
+COPY sreh_copy FROM :'bad_data3' DELIMITER '|' SEGMENT REJECT LIMIT 20 
PERCENT; --pass
 NOTICE:  found 9 data formatting errors (9 or more input rows), rejected 
related input data
 -- MPP-2933 (multiple dist-key attr conversion errors)
 create table t2933 (col1 varchar(3) NULL , col2 char(1) NULL, col3 varchar(4) 
NULL, col4 char(1) NULL, col5 varchar(20) NULL) 
@@ -224,8 +227,10 @@ DROP TABLE sreh_constr;
 -- ###########################################################
 -- External Tables 
 -- ###########################################################
+\getenv binddir PG_BINDDIR
+\set gpfdist_sreh_start_e '((' :binddir '/gpfdist -p 8080 -d ' :abs_srcdir 
'/data  </dev/null >/dev/null 2>&1 &); for i in `seq 1 30`; do curl 
127.0.0.1:8080 >/dev/null 2>&1 && break; sleep 1; done; echo "starting...") '
 CREATE EXTERNAL WEB TABLE gpfdist_sreh_start (x text)
-execute E'((@bindir@/gpfdist -p 8080 -d @abs_srcdir@/data  </dev/null 
>/dev/null 2>&1 &); for i in `seq 1 30`; do curl 127.0.0.1:8080 >/dev/null 2>&1 
&& break; sleep 1; done; echo "starting...") '
+execute E:'gpfdist_sreh_start_e'
 on MASTER
 FORMAT 'text' (delimiter '|');
 CREATE EXTERNAL WEB TABLE gpfdist_sreh_stop (x text)
@@ -249,8 +254,10 @@ CREATE TABLE sreh_target(a int, b int, c int) distributed 
by(a);
 -- 
 -- reject limit only
 --
+\getenv hostname PG_HOSTNAME
+\set bad_data1 'gpfdist://' :hostname ':8080/bad_data1.data'
 CREATE EXTERNAL TABLE sreh_ext(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data1.data' )
+LOCATION (:'bad_data1' )
 FORMAT 'text' (delimiter '|')
 SEGMENT REJECT LIMIT 10000;
 SELECT * FROM sreh_ext;
@@ -279,7 +286,7 @@ DROP EXTERNAL TABLE sreh_ext;
 -- reject limit only - low value that gets reached
 --
 CREATE EXTERNAL TABLE sreh_ext(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data1.data' )
+LOCATION (:'bad_data1' )
 FORMAT 'text' (delimiter '|')
 SEGMENT REJECT LIMIT 2;
 SELECT * FROM sreh_ext ORDER BY a;
@@ -302,7 +309,7 @@ DROP EXTERNAL TABLE sreh_ext;
 -- error logs
 --
 CREATE EXTERNAL TABLE sreh_ext_err_tbl(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data1.data' )
+LOCATION (:'bad_data1' )
 FORMAT 'text' (delimiter '|')
 LOG ERRORS INTO WHATEVER
 SEGMENT REJECT LIMIT 1000;
@@ -321,8 +328,9 @@ NOTICE:  found 10 data formatting errors (10 or more input 
rows), rejected relat
 (6 rows)
 
 -- Verify the fields that we easily can.
+\set bad_data1_like 'gpfdist://' :hostname ':8080/bad_data1.data [%]'
 WITH error_log AS (SELECT * FROM gp_read_error_log('sreh_ext_err_tbl'))
-  SELECT relname, filename LIKE 'gpfdist://@hostname@:8080/bad_data1.data [%]' 
as filename_ok, linenum, errmsg from error_log;
+  SELECT relname, filename LIKE :'bad_data1_like' as filename_ok, linenum, 
errmsg from error_log;
      relname      | filename_ok | linenum |                           errmsg   
                        
 
------------------+-------------+---------+------------------------------------------------------------
  sreh_ext_err_tbl | t           |       2 | missing data for column "b"
@@ -338,7 +346,7 @@ WITH error_log AS (SELECT * FROM 
gp_read_error_log('sreh_ext_err_tbl'))
 (10 rows)
 
 CREATE EXTERNAL TABLE sreh_ext(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data1.data' )
+LOCATION (:'bad_data1' )
 FORMAT 'text' (delimiter '|')
 LOG ERRORS
 SEGMENT REJECT LIMIT 1000;
@@ -437,7 +445,7 @@ CREATE TABLE sreh_constr(a int, b int, c int check (c < 
10));
 NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' 
as the Apache Cloudberry data distribution key for this table.
 HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make 
sure column(s) chosen are the optimal data distribution key to minimize skew.
 CREATE EXTERNAL TABLE sreh_ext(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data1.data' )
+LOCATION (:'bad_data1' )
 FORMAT 'text' (delimiter '|')
 LOG ERRORS
 SEGMENT REJECT LIMIT 1000;
@@ -464,8 +472,9 @@ SELECT COUNT(*) FROM sreh_constr; -- should be empty
 -- should always be the same regardless of number of QE's in the system.
 --
 set gp_reject_percent_threshold = 100;
+\set bad_data3 'gpfdist://' :hostname ':8080/bad_data3.data'
 CREATE EXTERNAL TABLE sreh_ext_10percent(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data3.data' )
+LOCATION (:'bad_data3' )
 FORMAT 'text' (delimiter '|')
 SEGMENT REJECT LIMIT 10 PERCENT;
 SELECT count(*) FROM sreh_ext_10percent; -- pass
@@ -476,7 +485,7 @@ NOTICE:  found 9 data formatting errors (9 or more input 
rows), rejected related
 (1 row)
 
 CREATE EXTERNAL TABLE sreh_ext_2percent(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data3.data' )
+LOCATION (:'bad_data3' )
 FORMAT 'text' (delimiter '|')
 SEGMENT REJECT LIMIT 2 PERCENT;
 SELECT count(*) FROM sreh_ext_2percent; -- fail
@@ -492,7 +501,7 @@ ERROR:  segment reject limit reached, aborting operation
 DETAIL:  Last error was: invalid input syntax for type integer: "BAD", column 
a  (seg0 slice1 @hostname@:11001 pid=68456)
 CONTEXT:  External table sreh_ext_10percent, line 15 of 
gpfdist://@hostname@:8080/bad_data3.data, column a
 CREATE EXTERNAL TABLE sreh_ext_20percent(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data3.data' )
+LOCATION (:'bad_data3' )
 FORMAT 'text' (delimiter '|')
 SEGMENT REJECT LIMIT 20 PERCENT;
 SELECT count(*) FROM sreh_ext_20percent; -- pass
diff --git a/src/test/regress/sql/appendonly.sql 
b/src/test/regress/sql/appendonly.sql
index 7a824f3d0e6..2d5f3f4b64e 100644
--- a/src/test/regress/sql/appendonly.sql
+++ b/src/test/regress/sql/appendonly.sql
@@ -129,12 +129,14 @@ $$ language sql;
 -- supported sql
 --------------------
 
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set tenk_data :abs_srcdir '/data/tenk.data'
 -- COPY
-COPY tenk_heap FROM '@abs_srcdir@/data/tenk.data';
-COPY tenk_ao1 FROM '@abs_srcdir@/data/tenk.data';
-COPY tenk_ao2 FROM '@abs_srcdir@/data/tenk.data';
-COPY tenk_ao3 FROM '@abs_srcdir@/data/tenk.data';
-COPY tenk_ao4 FROM '@abs_srcdir@/data/tenk.data';
+COPY tenk_heap FROM :'tenk_data';
+COPY tenk_ao1 FROM :'tenk_data';
+COPY tenk_ao2 FROM :'tenk_data';
+COPY tenk_ao3 FROM :'tenk_data';
+COPY tenk_ao4 FROM :'tenk_data';
 ANALYZE tenk_heap;
 ANALYZE tenk_ao1;
 ANALYZE tenk_ao2;
diff --git a/src/test/regress/sql/event_trigger_gp.sql 
b/src/test/regress/sql/event_trigger_gp.sql
index 5b3aaec7f59..fd7aa22892a 100644
--- a/src/test/regress/sql/event_trigger_gp.sql
+++ b/src/test/regress/sql/event_trigger_gp.sql
@@ -1,5 +1,3 @@
-\getenv libdir PG_LIBDIR
-\set gpextprotocol :libdir '/gpextprotocol.so'
 create or replace function test_event_trigger() returns event_trigger as $$
 BEGIN
     RAISE NOTICE 'test_event_trigger: % %', tg_event, tg_tag;
@@ -13,8 +11,8 @@ create event trigger regress_event_trigger on 
ddl_command_start
 CREATE EXTERNAL WEB TABLE echotest (x text) EXECUTE 'echo foo;' FORMAT 'text';
 DROP EXTERNAL TABLE echotest;
 
-CREATE OR REPLACE FUNCTION write_to_file() RETURNS integer as 
:'gpextprotocol', 'demoprot_export' LANGUAGE C STABLE NO SQL;
-CREATE OR REPLACE FUNCTION read_from_file() RETURNS integer as 
:'gpextprotocol', 'demoprot_import' LANGUAGE C STABLE NO SQL;
+CREATE OR REPLACE FUNCTION write_to_file() RETURNS integer as 
'$libdir/gpextprotocol.so', 'demoprot_export' LANGUAGE C STABLE NO SQL;
+CREATE OR REPLACE FUNCTION read_from_file() RETURNS integer as 
'$libdir/gpextprotocol.so', 'demoprot_import' LANGUAGE C STABLE NO SQL;
 
 CREATE PROTOCOL demoprot_event_trig_test (readfunc = 'read_from_file', 
writefunc = 'write_to_file');
 
diff --git a/src/test/regress/sql/sreh.sql b/src/test/regress/sql/sreh.sql
index c722fd2ee11..f5a32b85238 100755
--- a/src/test/regress/sql/sreh.sql
+++ b/src/test/regress/sql/sreh.sql
@@ -130,8 +130,9 @@ DROP TABLE sreh_constr;
 -- External Tables 
 -- ###########################################################
 \getenv binddir PG_BINDDIR
+\set gpfdist_sreh_start_e '((' :binddir '/gpfdist -p 8080 -d ' :abs_srcdir 
'/data  </dev/null >/dev/null 2>&1 &); for i in `seq 1 30`; do curl 
127.0.0.1:8080 >/dev/null 2>&1 && break; sleep 1; done; echo "starting...") '
 CREATE EXTERNAL WEB TABLE gpfdist_sreh_start (x text)
-execute E'(('||:'binddir'||'/gpfdist -p 8080 -d '||:'abs_srcdir'||'/data  
</dev/null >/dev/null 2>&1 &); for i in `seq 1 30`; do curl 127.0.0.1:8080 
>/dev/null 2>&1 && break; sleep 1; done; echo "starting...") '
+execute E:'gpfdist_sreh_start_e'
 on MASTER
 FORMAT 'text' (delimiter '|');
 
@@ -150,8 +151,9 @@ CREATE TABLE sreh_target(a int, b int, c int) distributed 
by(a);
 -- reject limit only
 --
 \getenv hostname PG_HOSTNAME
+\set bad_data1 'gpfdist://' :hostname ':8080/bad_data1.data'
 CREATE EXTERNAL TABLE sreh_ext(a int, b int, c int)
-LOCATION ('gpfdist://'||'@hostname@'||':8080/bad_data1.data' )
+LOCATION (:'bad_data1' )
 FORMAT 'text' (delimiter '|')
 SEGMENT REJECT LIMIT 10000;
 
@@ -165,7 +167,7 @@ DROP EXTERNAL TABLE sreh_ext;
 -- reject limit only - low value that gets reached
 --
 CREATE EXTERNAL TABLE sreh_ext(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data1.data' )
+LOCATION (:'bad_data1' )
 FORMAT 'text' (delimiter '|')
 SEGMENT REJECT LIMIT 2;
 
@@ -179,18 +181,19 @@ DROP EXTERNAL TABLE sreh_ext;
 -- error logs
 --
 CREATE EXTERNAL TABLE sreh_ext_err_tbl(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data1.data' )
+LOCATION (:'bad_data1' )
 FORMAT 'text' (delimiter '|')
 LOG ERRORS INTO WHATEVER
 SEGMENT REJECT LIMIT 1000;
 
 SELECT * FROM sreh_ext_err_tbl ORDER BY a;
 -- Verify the fields that we easily can.
+\set bad_data1_like 'gpfdist://' :hostname ':8080/bad_data1.data [%]'
 WITH error_log AS (SELECT * FROM gp_read_error_log('sreh_ext_err_tbl'))
-  SELECT relname, filename LIKE 'gpfdist://@hostname@:8080/bad_data1.data [%]' 
as filename_ok, linenum, errmsg from error_log;
+  SELECT relname, filename LIKE :'bad_data1_like' as filename_ok, linenum, 
errmsg from error_log;
 
 CREATE EXTERNAL TABLE sreh_ext(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data1.data' )
+LOCATION (:'bad_data1' )
 FORMAT 'text' (delimiter '|')
 LOG ERRORS
 SEGMENT REJECT LIMIT 1000;
@@ -219,7 +222,7 @@ DROP EXTERNAL TABLE sreh_ext_err_tbl;
 --
 CREATE TABLE sreh_constr(a int, b int, c int check (c < 10));
 CREATE EXTERNAL TABLE sreh_ext(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data1.data' )
+LOCATION (:'bad_data1' )
 FORMAT 'text' (delimiter '|')
 LOG ERRORS
 SEGMENT REJECT LIMIT 1000;
@@ -237,15 +240,16 @@ SELECT COUNT(*) FROM sreh_constr; -- should be empty
 -- should always be the same regardless of number of QE's in the system.
 --
 set gp_reject_percent_threshold = 100;
+\set bad_data3 'gpfdist://' :hostname ':8080/bad_data3.data'
 CREATE EXTERNAL TABLE sreh_ext_10percent(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data3.data' )
+LOCATION (:'bad_data3' )
 FORMAT 'text' (delimiter '|')
 SEGMENT REJECT LIMIT 10 PERCENT;
 
 SELECT count(*) FROM sreh_ext_10percent; -- pass
 
 CREATE EXTERNAL TABLE sreh_ext_2percent(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data3.data' )
+LOCATION (:'bad_data3' )
 FORMAT 'text' (delimiter '|')
 SEGMENT REJECT LIMIT 2 PERCENT;
 
@@ -258,7 +262,7 @@ set gp_reject_percent_threshold = 10;
 SELECT count(*) FROM sreh_ext_10percent; -- fail
 
 CREATE EXTERNAL TABLE sreh_ext_20percent(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data3.data' )
+LOCATION (:'bad_data3' )
 FORMAT 'text' (delimiter '|')
 SEGMENT REJECT LIMIT 20 PERCENT;
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to