This is an automated email from the ASF dual-hosted git repository.

yjhjstz pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudberry.git

commit 8425ed16b0e54c42dfb382c6c3120d83dc33e3f2
Author: Adam Lee <[email protected]>
AuthorDate: Thu Aug 25 16:07:44 2022 +0800

    Remove an redundant case from workfile_limits (#13998)
    
    The below commit added a case to test the GUC 
gp_workfile_limit_files_per_query,
    but there is already one and the newly added one takes way too much time.
    
            commit 209694154bdc5797ea66b2116dcd82fc9454e593
            Author: zwenlin <[email protected]>
            Date:   Fri Mar 25 19:14:13 2022 +0800
    
                Remove gpdb_12_merge_fixme in buffile.c.
    
                PostgreSQL breaks temporary files into 1 GB segments. Greenplum 
didn't
                do that until v12 merge greenplum-db/gpdb@19cd1cf breaks 
BufFiles into
                segments and counts each segment file as one work file.
    
                The GUC gp_workfile_limit_files_per_query is used to control 
the maximum
                number of spill files for a given query, to prevent runaway 
queries from
                destroying the entire system. Counting each segment file is 
reasonable
                for this scenario.
    
                This PR removes the FIXME of worrying about the count method 
and adds a test.
    
    This commit removes the newly added case.
---
 src/test/regress/expected/workfile_limits.out | 24 ------------------------
 src/test/regress/sql/workfile_limits.sql      | 21 ---------------------
 2 files changed, 45 deletions(-)

diff --git a/src/test/regress/expected/workfile_limits.out 
b/src/test/regress/expected/workfile_limits.out
index 99ce7ff8db..947f8943af 100644
--- a/src/test/regress/expected/workfile_limits.out
+++ b/src/test/regress/expected/workfile_limits.out
@@ -36,30 +36,6 @@ union
 select count(g) from generate_series(1, 500000) g
 order by 1;
 ERROR:  number of workfiles per query limit exceeded
--- Test work file limit number after merge PG 1GB segment
--- Ensure the queries below need to spill to disk.
-set statement_mem='1 MB';
--- Also test limit on number of files (gp_workfile_limit_files_per_query)
--- The query below will generate 6 temp files:
--- 1 gpadmin gpadmin 1.0G Mar 25 23:13 pgsql_tmpLogicalTape16802.3
--- 1 gpadmin gpadmin 1.0G Mar 25 23:13 pgsql_tmpLogicalTape16802.4
--- 1 gpadmin gpadmin 247M Mar 25 23:13 pgsql_tmpLogicalTape16802.5
--- 1 gpadmin gpadmin 1.0G Mar 25 23:08 pgsql_tmpslice0_tuplestore16802.0
--- 1 gpadmin gpadmin 1.0G Mar 25 23:09 pgsql_tmpslice0_tuplestore16802.1
--- 1 gpadmin gpadmin 623M Mar 25 23:09 pgsql_tmpslice0_tuplestore16802.2
--- On GP6, the query will generate 2 temp files:
--- 1 gpadmin gpadmin 1.5G Mar 24 22:50 pgsql_tmp_slice-1_tuplestore_1_3251.16
--- 1 gpadmin gpadmin 2.2G Mar 24 22:54 pgsql_tmp_Sort_2_3251.17
--- On master, each 1 GB segment file count as work file, and the 
work_set->perquery->num_files count as 6.
-set gp_workfile_limit_files_per_query='6';
-select count(distinct g) from generate_series(1, 200000000) g;
-   count   
------------
- 200000000
-(1 row)
-
-reset gp_workfile_limit_files_per_query;
-reset statement_mem;
 -- We cannot test the per-segment limit, because changing it requires
 -- a postmaster restart. It's enforced in the same way as the per-query
 -- limit, though, and it's simpler, so if the per-query limit works,
diff --git a/src/test/regress/sql/workfile_limits.sql 
b/src/test/regress/sql/workfile_limits.sql
index e7f3b3bf28..54cd1d2d55 100644
--- a/src/test/regress/sql/workfile_limits.sql
+++ b/src/test/regress/sql/workfile_limits.sql
@@ -31,27 +31,6 @@ union
 select count(g) from generate_series(1, 500000) g
 order by 1;
 
--- Test work file limit number after merge PG 1GB segment
-
--- Ensure the queries below need to spill to disk.
-set statement_mem='1 MB';
--- Also test limit on number of files (gp_workfile_limit_files_per_query)
--- The query below will generate 6 temp files:
--- 1 gpadmin gpadmin 1.0G Mar 25 23:13 pgsql_tmpLogicalTape16802.3
--- 1 gpadmin gpadmin 1.0G Mar 25 23:13 pgsql_tmpLogicalTape16802.4
--- 1 gpadmin gpadmin 247M Mar 25 23:13 pgsql_tmpLogicalTape16802.5
--- 1 gpadmin gpadmin 1.0G Mar 25 23:08 pgsql_tmpslice0_tuplestore16802.0
--- 1 gpadmin gpadmin 1.0G Mar 25 23:09 pgsql_tmpslice0_tuplestore16802.1
--- 1 gpadmin gpadmin 623M Mar 25 23:09 pgsql_tmpslice0_tuplestore16802.2
--- On GP6, the query will generate 2 temp files:
--- 1 gpadmin gpadmin 1.5G Mar 24 22:50 pgsql_tmp_slice-1_tuplestore_1_3251.16
--- 1 gpadmin gpadmin 2.2G Mar 24 22:54 pgsql_tmp_Sort_2_3251.17
--- On master, each 1 GB segment file count as work file, and the 
work_set->perquery->num_files count as 6.
-set gp_workfile_limit_files_per_query='6';
-select count(distinct g) from generate_series(1, 200000000) g;
-
-reset gp_workfile_limit_files_per_query;
-reset statement_mem;
 
 -- We cannot test the per-segment limit, because changing it requires
 -- a postmaster restart. It's enforced in the same way as the per-query


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to