From 39a7e1860778bc4ed76d415ee0b04dd9fd888de2 Mon Sep 17 00:00:00 2001
From: Amit Kapila <akapila@postgresql.org>
Date: Mon, 2 Sep 2019 12:36:21 +0530
Subject: [PATCH] Forbid Limit node to shutdown resources.

As part of commit 19df1702f5, we allowed a Limit node to shut down the
resources to get the correct stats from parallel nodes beneath it.  The
idea was correct, but we could not correctly identify the end of execution
and it ends up destroying the parallel context which is required for
rescans.  The correct fix is to identify whether rescans are possible and
then accordingly shutdown resources.  This would require us to invent some
new infrastructure which we can't back-patch. So, for now, we will just
disallow to shutdown resources via Limit node.  This will open up the
issue of incorrect stats when there are parallel nodes beneath Limit node,
but it will still be better to at least allow such queries to run to
completion and give correct results.

Reported-by: Jerry Sievers
Diagnosed-by: Thomas Munro
Author: Amit Kapila, testcase by Vignesh C
Backpatch-through: 9.6
Discussion: https://postgr.es/m/87ims2amh6.fsf@jsievers.enova.com
---
 src/backend/executor/nodeLimit.c              |  8 ------
 src/test/regress/expected/select_parallel.out | 41 ++++++++++++++++++++++++++-
 src/test/regress/sql/select_parallel.sql      | 22 +++++++++++++-
 3 files changed, 61 insertions(+), 10 deletions(-)

diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index baa669a..d21b5ce 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -134,14 +134,6 @@ ExecLimit(PlanState *pstate)
 					node->position - node->offset >= node->count)
 				{
 					node->lstate = LIMIT_WINDOWEND;
-
-					/*
-					 * If we know we won't need to back up, we can release
-					 * resources at this point.
-					 */
-					if (!(node->ps.state->es_top_eflags & EXEC_FLAG_BACKWARD))
-						(void) ExecShutdownNode(outerPlan);
-
 					return NULL;
 				}
 
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index 04aecef..b11aee5 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -469,9 +469,48 @@ select * from
   9000 | 3
 (3 rows)
 
-reset enable_material;
+-- test rescans for a Limit node with a parallel node beneath it.
 reset enable_seqscan;
+set enable_indexonlyscan to off;
+set enable_indexscan to off;
+alter table tenk1 set (parallel_workers = 0);
+alter table tenk2 set (parallel_workers = 1);
+explain (costs off)
+select count(*) from tenk1
+  left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss
+  on tenk1.unique1 < ss.unique1 + 1
+  where tenk1.unique1 < 2;
+                         QUERY PLAN                         
+------------------------------------------------------------
+ Aggregate
+   ->  Nested Loop Left Join
+         Join Filter: (tenk1.unique1 < (tenk2.unique1 + 1))
+         ->  Seq Scan on tenk1
+               Filter: (unique1 < 2)
+         ->  Limit
+               ->  Gather Merge
+                     Workers Planned: 1
+                     ->  Sort
+                           Sort Key: tenk2.unique1
+                           ->  Parallel Seq Scan on tenk2
+(11 rows)
+
+select count(*) from tenk1
+  left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss
+  on tenk1.unique1 < ss.unique1 + 1
+  where tenk1.unique1 < 2;
+ count 
+-------
+  1999
+(1 row)
+
+--reset the value of workers for each table as it was before this test.
+alter table tenk1 set (parallel_workers = 4);
+alter table tenk2 reset (parallel_workers);
+reset enable_material;
 reset enable_bitmapscan;
+reset enable_indexonlyscan;
+reset enable_indexscan;
 -- test parallel bitmap heap scan.
 set enable_seqscan to off;
 set enable_indexscan to off;
diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql
index 2c05661..5cd0725 100644
--- a/src/test/regress/sql/select_parallel.sql
+++ b/src/test/regress/sql/select_parallel.sql
@@ -173,9 +173,29 @@ select * from
   (select count(*) from tenk1 where thousand > 99) ss
   right join (values (1),(2),(3)) v(x) on true;
 
-reset enable_material;
+-- test rescans for a Limit node with a parallel node beneath it.
 reset enable_seqscan;
+set enable_indexonlyscan to off;
+set enable_indexscan to off;
+alter table tenk1 set (parallel_workers = 0);
+alter table tenk2 set (parallel_workers = 1);
+explain (costs off)
+select count(*) from tenk1
+  left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss
+  on tenk1.unique1 < ss.unique1 + 1
+  where tenk1.unique1 < 2;
+select count(*) from tenk1
+  left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss
+  on tenk1.unique1 < ss.unique1 + 1
+  where tenk1.unique1 < 2;
+--reset the value of workers for each table as it was before this test.
+alter table tenk1 set (parallel_workers = 4);
+alter table tenk2 reset (parallel_workers);
+
+reset enable_material;
 reset enable_bitmapscan;
+reset enable_indexonlyscan;
+reset enable_indexscan;
 
 -- test parallel bitmap heap scan.
 set enable_seqscan to off;
-- 
1.8.3.1

