From d5871edc4daaa5218aa627059afaa65ba39ea767 Mon Sep 17 00:00:00 2001
From: Thomas Munro <thomas.munro@enterprisedb.com>
Date: Mon, 15 Jan 2018 15:34:35 +1300
Subject: [PATCH] Reduce the size of the tables used in the hash join
 regression tests.

Thomas Munro, per complaint from Tom Lane.
---
 src/test/regress/expected/join.out | 60 +++++++++++++++++++-------------------
 src/test/regress/sql/join.sql      | 28 +++++++++---------
 2 files changed, 44 insertions(+), 44 deletions(-)

diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index 02e7d56e550..5f705b017c0 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -5860,27 +5860,27 @@ $$;
 -- Make a simple relation with well distributed keys and correctly
 -- estimated size.
 create table simple as
-  select generate_series(1, 20000) AS id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
+  select generate_series(1, 10000) AS id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
 alter table simple set (parallel_workers = 2);
 analyze simple;
 -- Make a relation whose size we will under-estimate.  We want stats
--- to say 1000 rows, but actually there are 20,000 rows.
+-- to say 1000 rows, but actually there are 10,000 rows.
 create table bigger_than_it_looks as
-  select generate_series(1, 20000) as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
+  select generate_series(1, 10000) as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
 alter table bigger_than_it_looks set (autovacuum_enabled = 'false');
 alter table bigger_than_it_looks set (parallel_workers = 2);
 analyze bigger_than_it_looks;
 update pg_class set reltuples = 1000 where relname = 'bigger_than_it_looks';
 -- Make a relation whose size we underestimate and that also has a
 -- kind of skew that breaks our batching scheme.  We want stats to say
--- 2 rows, but actually there are 20,000 rows with the same key.
+-- 2 rows, but actually there are 10,000 rows with the same key.
 create table extremely_skewed (id int, t text);
 alter table extremely_skewed set (autovacuum_enabled = 'false');
 alter table extremely_skewed set (parallel_workers = 2);
 analyze extremely_skewed;
 insert into extremely_skewed
   select 42 as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
-  from generate_series(1, 20000);
+  from generate_series(1, 10000);
 update pg_class
   set reltuples = 2, relpages = pg_relation_size('extremely_skewed') / 8192
   where relname = 'extremely_skewed';
@@ -5909,7 +5909,7 @@ explain (costs off)
 select count(*) from simple r join simple s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select original > 1 as initially_multibatch, final > original as increased_batches
@@ -5946,7 +5946,7 @@ explain (costs off)
 select count(*) from simple r join simple s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select original > 1 as initially_multibatch, final > original as increased_batches
@@ -5983,7 +5983,7 @@ explain (costs off)
 select count(*) from simple r join simple s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select original > 1 as initially_multibatch, final > original as increased_batches
@@ -6003,7 +6003,7 @@ rollback to settings;
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 explain (costs off)
   select count(*) from simple r join simple s using (id);
                QUERY PLAN               
@@ -6019,7 +6019,7 @@ explain (costs off)
 select count(*) from simple r join simple s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select original > 1 as initially_multibatch, final > original as increased_batches
@@ -6036,7 +6036,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -6056,7 +6056,7 @@ explain (costs off)
 select count(*) from simple r join simple s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select original > 1 as initially_multibatch, final > original as increased_batches
@@ -6073,7 +6073,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '192kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -6093,7 +6093,7 @@ explain (costs off)
 select count(*) from simple r join simple s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select original > 1 as initially_multibatch, final > original as increased_batches
@@ -6114,7 +6114,7 @@ rollback to settings;
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 explain (costs off)
   select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
                       QUERY PLAN                      
@@ -6130,7 +6130,7 @@ explain (costs off)
 select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select original > 1 as initially_multibatch, final > original as increased_batches
@@ -6147,7 +6147,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -6167,7 +6167,7 @@ explain (costs off)
 select count(*) from simple r join bigger_than_it_looks s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select original > 1 as initially_multibatch, final > original as increased_batches
@@ -6184,7 +6184,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 1;
-set local work_mem = '192kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -6204,7 +6204,7 @@ explain (costs off)
 select count(*) from simple r join bigger_than_it_looks s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select original > 1 as initially_multibatch, final > original as increased_batches
@@ -6226,7 +6226,7 @@ rollback to settings;
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
                     QUERY PLAN                    
@@ -6242,7 +6242,7 @@ explain (costs off)
 select count(*) from simple r join extremely_skewed s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select * from hash_join_batches(
@@ -6258,7 +6258,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
@@ -6277,7 +6277,7 @@ explain (costs off)
 select count(*) from simple r join extremely_skewed s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select * from hash_join_batches(
@@ -6293,7 +6293,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 1;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
@@ -6313,7 +6313,7 @@ explain (costs off)
 select count(*) from simple r join extremely_skewed s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 select * from hash_join_batches(
@@ -6571,7 +6571,7 @@ explain (costs off)
 select  count(*) from simple r full outer join simple s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 rollback to settings;
@@ -6593,7 +6593,7 @@ explain (costs off)
 select  count(*) from simple r full outer join simple s using (id);
  count 
 -------
- 20000
+ 10000
 (1 row)
 
 rollback to settings;
@@ -6616,7 +6616,7 @@ explain (costs off)
 select  count(*) from simple r full outer join simple s on (r.id = 0 - s.id);
  count 
 -------
- 40000
+ 20000
 (1 row)
 
 rollback to settings;
@@ -6638,7 +6638,7 @@ explain (costs off)
 select  count(*) from simple r full outer join simple s on (r.id = 0 - s.id);
  count 
 -------
- 40000
+ 20000
 (1 row)
 
 rollback to settings;
diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql
index dd62c38c15e..149ddf4c23d 100644
--- a/src/test/regress/sql/join.sql
+++ b/src/test/regress/sql/join.sql
@@ -2001,14 +2001,14 @@ $$;
 -- Make a simple relation with well distributed keys and correctly
 -- estimated size.
 create table simple as
-  select generate_series(1, 20000) AS id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
+  select generate_series(1, 10000) AS id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
 alter table simple set (parallel_workers = 2);
 analyze simple;
 
 -- Make a relation whose size we will under-estimate.  We want stats
--- to say 1000 rows, but actually there are 20,000 rows.
+-- to say 1000 rows, but actually there are 10,000 rows.
 create table bigger_than_it_looks as
-  select generate_series(1, 20000) as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
+  select generate_series(1, 10000) as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa';
 alter table bigger_than_it_looks set (autovacuum_enabled = 'false');
 alter table bigger_than_it_looks set (parallel_workers = 2);
 analyze bigger_than_it_looks;
@@ -2016,14 +2016,14 @@ update pg_class set reltuples = 1000 where relname = 'bigger_than_it_looks';
 
 -- Make a relation whose size we underestimate and that also has a
 -- kind of skew that breaks our batching scheme.  We want stats to say
--- 2 rows, but actually there are 20,000 rows with the same key.
+-- 2 rows, but actually there are 10,000 rows with the same key.
 create table extremely_skewed (id int, t text);
 alter table extremely_skewed set (autovacuum_enabled = 'false');
 alter table extremely_skewed set (parallel_workers = 2);
 analyze extremely_skewed;
 insert into extremely_skewed
   select 42 as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
-  from generate_series(1, 20000);
+  from generate_series(1, 10000);
 update pg_class
   set reltuples = 2, relpages = pg_relation_size('extremely_skewed') / 8192
   where relname = 'extremely_skewed';
@@ -2087,7 +2087,7 @@ rollback to settings;
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 explain (costs off)
   select count(*) from simple r join simple s using (id);
 select count(*) from simple r join simple s using (id);
@@ -2101,7 +2101,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -2116,7 +2116,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '192kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -2136,7 +2136,7 @@ rollback to settings;
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 explain (costs off)
   select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
 select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
@@ -2150,7 +2150,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -2165,7 +2165,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 1;
-set local work_mem = '192kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -2186,7 +2186,7 @@ rollback to settings;
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
 select count(*) from simple r join extremely_skewed s using (id);
@@ -2199,7 +2199,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
@@ -2213,7 +2213,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 1;
-set local work_mem = '128kB';
+set local work_mem = '64kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
-- 
2.15.1

