On Mon, Feb 03, 2020 at 06:53:01AM -0800, Andres Freund wrote:
> On 2020-01-03 10:19:26 -0600, Justin Pryzby wrote:
> > On Sun, Feb 17, 2019 at 11:29:56AM -0500, Jeff Janes wrote:
> > https://www.postgresql.org/message-id/CAMkU%3D1zBJNVo2DGYBgLJqpu8fyjCE_ys%2Bmsr6pOEoiwA7y5jrA%40mail.gmail.com
> > > What would I find very useful is [...] if the HashAggregate node under
> > > "explain analyze" would report memory and bucket stats; and if the 
> > > Aggregate
> > > node would report...anything.
> 
> Yea, that'd be amazing. It probably should be something every
> execGrouping.c using node can opt into.

Do you think it should be implemented in execGrouping/TupleHashTableData (as I
did) ?  I also did an experiment moving into the higher level nodes, but I
guess that's not actually desirable.  There's currently different output from
tests between the implementation using execGrouping.c and the one outside it,
so there's at least an issue with grouping sets.

> > +   hashtable->hinstrument.nbuckets_original = nbuckets;
> > +   hashtable->hinstrument.nbuckets = nbuckets;
> > +   hashtable->hinstrument.space_peak = entrysize * 
> > hashtable->hashtab->size;
> 
> That's not actually an accurate accounting of memory, because for filled
> entries a lot of memory is used to store actual tuples:

Thanks - I think I finally understood this.

I updated some existing tests to show the new output.  I imagine that's a
throwaway commit, and should eventually add new tests for each of these node
types under explain analyze.

I've been testing the various nodes like:

--heapscan:
DROP TABLE t; CREATE TABLE t (i int unique) WITH(autovacuum_enabled=off); 
INSERT INTO t SELECT generate_series(1,99999); SET enable_seqscan=off; SET 
parallel_tuple_cost=0; SET parallel_setup_cost=0; SET enable_indexonlyscan=off; 
explain analyze verbose SELECT * FROM t WHERE i BETWEEN 999 and 99999999;

--setop:
explain( analyze,verbose) SELECT * FROM generate_series(1,999) EXCEPT (SELECT 
NULL UNION ALL SELECT * FROM generate_series(1,99999));
   Buckets: 2048 (originally 256)  Memory Usage: hashtable: 48kB, tuples: 8Kb

--recursive union:
explain analyze verbose WITH RECURSIVE t(n) AS ( SELECT 'foo' UNION SELECT n || 
' bar' FROM t WHERE length(n) < 9999) SELECT n, n IS OF (text) AS is_text FROM 
t;

--subplan
explain analyze verbose SELECT i FROM generate_series(1,999)i WHERE (i,i) NOT 
IN (SELECT 1,1 UNION ALL SELECT j,j FROM generate_series(1,99999)j);
   Buckets: 262144 (originally 131072)  Memory Usage: hashtable: 6144kB, 
tuples: 782Kb
explain analyze verbose select i FROM generate_series(1,999)i WHERE(1,i) NOT in 
(select i,null::int from t) ;

--Agg:
explain (analyze,verbose) SELECT A,COUNT(1) FROM generate_series(1,99999)a 
GROUP BY 1;
   Buckets: 262144 (originally 256)  Memory Usage: hashtable: 6144kB, tuples: 
782Kb

explain (analyze, verbose) select i FROM generate_series(1,999)i WHERE(1,1) not 
in (select a,null from (SELECT generate_series(1,99999) a)x) ;

explain analyze verbose select * from (SELECT a FROM generate_series(1,99)a)v 
left join lateral (select v.a, four, ten, count(*) from (SELECT b four, 2 ten, 
b FROM generate_series(1,999)b)x group by cube(four,ten)) s on true order by 
v.a,four,ten;

--Grouping sets:
explain analyze verbose   select unique1,
         count(two), count(four), count(ten),
         count(hundred), count(thousand), count(twothousand),
         count(*)
    from tenk1 group by grouping sets 
(unique1,twothousand,thousand,hundred,ten,four,two);

-- 
Justin
>From dff7109e4d82fd498ae8493caa0e4c84b0f04c74 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sat, 15 Feb 2020 12:03:11 -0600
Subject: [PATCH v2 1/7] Run some existing tests with explain (ANALYZE)..

..in a separate, earlier patch, to better show what bits are added by later
patches for hashtable instrumentation.
---
 src/test/regress/expected/aggregates.out      |  20 +-
 src/test/regress/expected/groupingsets.out    | 298 ++++++++++++++------------
 src/test/regress/expected/select_parallel.out |  20 +-
 src/test/regress/expected/subselect.out       |  69 ++++++
 src/test/regress/expected/union.out           |  71 +++---
 src/test/regress/sql/aggregates.sql           |   2 +-
 src/test/regress/sql/groupingsets.sql         |  44 ++--
 src/test/regress/sql/select_parallel.sql      |   4 +-
 src/test/regress/sql/subselect.sql            |  25 +++
 src/test/regress/sql/union.sql                |   6 +-
 10 files changed, 341 insertions(+), 218 deletions(-)

diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index f457b5b..b3dcbaa 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -2342,18 +2342,20 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*)
 -- Make sure that generation of HashAggregate for uniqification purposes
 -- does not lead to array overflow due to unexpected duplicate hash keys
 -- see cafeejokku0u+a_a9r9316djw-yw3-+gtgvy3ju655qrhr3j...@mail.gmail.com
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select 1 from tenk1
    where (hundred, thousand) in (select twothousand, twothousand from onek);
-                         QUERY PLAN                          
--------------------------------------------------------------
- Hash Join
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Hash Join (actual rows=1000 loops=1)
    Hash Cond: (tenk1.hundred = onek.twothousand)
-   ->  Seq Scan on tenk1
+   ->  Seq Scan on tenk1 (actual rows=1000 loops=1)
          Filter: (hundred = thousand)
-   ->  Hash
-         ->  HashAggregate
+         Rows Removed by Filter: 9000
+   ->  Hash (actual rows=200 loops=1)
+         Buckets: 1024  Batches: 1  Memory Usage: 16kB
+         ->  HashAggregate (actual rows=200 loops=1)
                Group Key: onek.twothousand, onek.twothousand
-               ->  Seq Scan on onek
-(8 rows)
+               ->  Seq Scan on onek (actual rows=1000 loops=1)
+(10 rows)
 
diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out
index c1f802c..7348f39 100644
--- a/src/test/regress/expected/groupingsets.out
+++ b/src/test/regress/expected/groupingsets.out
@@ -362,19 +362,20 @@ select a, d, grouping(a,b,c)
 
 -- check that distinct grouping columns are kept separate
 -- even if they are equal()
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 select g as alias1, g as alias2
   from generate_series(1,3) g
  group by alias1, rollup(alias2);
-                   QUERY PLAN                   
-------------------------------------------------
- GroupAggregate
+                               QUERY PLAN                               
+------------------------------------------------------------------------
+ GroupAggregate (actual rows=6 loops=1)
    Group Key: g, g
    Group Key: g
-   ->  Sort
+   ->  Sort (actual rows=3 loops=1)
          Sort Key: g
-         ->  Function Scan on generate_series g
-(6 rows)
+         Sort Method: quicksort  Memory: 25kB
+         ->  Function Scan on generate_series g (actual rows=3 loops=1)
+(7 rows)
 
 select g as alias1, g as alias2
   from generate_series(1,3) g
@@ -458,16 +459,17 @@ ERROR:  aggregate functions are not allowed in FROM clause of their own query le
 LINE 3:        lateral (select a, b, sum(v.x) from gstest_data(v.x) ...
                                      ^
 -- min max optimization should still work with GROUP BY ()
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select min(unique1) from tenk1 GROUP BY ();
-                         QUERY PLAN                         
-------------------------------------------------------------
- Result
+                                     QUERY PLAN                                     
+------------------------------------------------------------------------------------
+ Result (actual rows=1 loops=1)
    InitPlan 1 (returns $0)
-     ->  Limit
-           ->  Index Only Scan using tenk1_unique1 on tenk1
+     ->  Limit (actual rows=1 loops=1)
+           ->  Index Only Scan using tenk1_unique1 on tenk1 (actual rows=1 loops=1)
                  Index Cond: (unique1 IS NOT NULL)
-(5 rows)
+                 Heap Fetches: 0
+(6 rows)
 
 -- Views with GROUPING SET queries
 CREATE VIEW gstest_view AS select a, b, grouping(a,b), sum(c), count(*), max(c)
@@ -638,17 +640,18 @@ select a, b, sum(v.x)
 (12 rows)
 
 -- Test reordering of grouping sets
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 select * from gstest1 group by grouping sets((a,b,v),(v)) order by v,b,a;
                                   QUERY PLAN                                  
 ------------------------------------------------------------------------------
- GroupAggregate
+ GroupAggregate (actual rows=20 loops=1)
    Group Key: "*VALUES*".column3, "*VALUES*".column2, "*VALUES*".column1
    Group Key: "*VALUES*".column3
-   ->  Sort
+   ->  Sort (actual rows=10 loops=1)
          Sort Key: "*VALUES*".column3, "*VALUES*".column2, "*VALUES*".column1
-         ->  Values Scan on "*VALUES*"
-(6 rows)
+         Sort Method: quicksort  Memory: 25kB
+         ->  Values Scan on "*VALUES*" (actual rows=10 loops=1)
+(7 rows)
 
 -- Agg level check. This query should error out.
 select (select grouping(a,b) from gstest2) from gstest2 group by a,b;
@@ -718,18 +721,20 @@ select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 or
    |     9
 (2 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
-            QUERY PLAN            
-----------------------------------
- GroupAggregate
+                       QUERY PLAN                        
+---------------------------------------------------------
+ GroupAggregate (actual rows=2 loops=1)
    Group Key: a
    Group Key: ()
    Filter: (a IS DISTINCT FROM 1)
-   ->  Sort
+   Rows Removed by Filter: 1
+   ->  Sort (actual rows=9 loops=1)
          Sort Key: a
-         ->  Seq Scan on gstest2
-(7 rows)
+         Sort Method: quicksort  Memory: 25kB
+         ->  Seq Scan on gstest2 (actual rows=9 loops=1)
+(9 rows)
 
 select v.c, (select count(*) from gstest2 group by () having v.c)
   from (values (false),(true)) v(c) order by v.c;
@@ -739,22 +744,24 @@ select v.c, (select count(*) from gstest2 group by () having v.c)
  t |     9
 (2 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select v.c, (select count(*) from gstest2 group by () having v.c)
     from (values (false),(true)) v(c) order by v.c;
-                        QUERY PLAN                         
------------------------------------------------------------
- Sort
+                              QUERY PLAN                               
+-----------------------------------------------------------------------
+ Sort (actual rows=2 loops=1)
    Sort Key: "*VALUES*".column1
-   ->  Values Scan on "*VALUES*"
+   Sort Method: quicksort  Memory: 25kB
+   ->  Values Scan on "*VALUES*" (actual rows=2 loops=1)
          SubPlan 1
-           ->  Aggregate
+           ->  Aggregate (actual rows=0 loops=2)
                  Group Key: ()
                  Filter: "*VALUES*".column1
-                 ->  Result
+                 Rows Removed by Filter: 0
+                 ->  Result (actual rows=4 loops=2)
                        One-Time Filter: "*VALUES*".column1
-                       ->  Seq Scan on gstest2
-(10 rows)
+                       ->  Seq Scan on gstest2 (actual rows=9 loops=1)
+(12 rows)
 
 -- HAVING with GROUPING queries
 select ten, grouping(ten) from onek
@@ -966,17 +973,18 @@ select a, b, grouping(a,b), sum(v), count(*), max(v)
    | 4 |        2 |  17 |     1 |  17
 (8 rows)
 
-explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v)
+explain (costs off, timing off, summary off, analyze) select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by grouping sets ((a),(b)) order by 3,1,2;
                                                QUERY PLAN                                               
 --------------------------------------------------------------------------------------------------------
- Sort
+ Sort (actual rows=8 loops=1)
    Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2
-   ->  HashAggregate
+   Sort Method: quicksort  Memory: 25kB
+   ->  HashAggregate (actual rows=8 loops=1)
          Hash Key: "*VALUES*".column1
          Hash Key: "*VALUES*".column2
-         ->  Values Scan on "*VALUES*"
-(6 rows)
+         ->  Values Scan on "*VALUES*" (actual rows=10 loops=1)
+(7 rows)
 
 select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by cube(a,b) order by 3,1,2;
@@ -1000,36 +1008,38 @@ select a, b, grouping(a,b), sum(v), count(*), max(v)
    |   |        3 | 145 |    10 |  19
 (16 rows)
 
-explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v)
+explain (costs off, timing off, summary off, analyze) select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by cube(a,b) order by 3,1,2;
                                                QUERY PLAN                                               
 --------------------------------------------------------------------------------------------------------
- Sort
+ Sort (actual rows=16 loops=1)
    Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2
-   ->  MixedAggregate
+   Sort Method: quicksort  Memory: 26kB
+   ->  MixedAggregate (actual rows=16 loops=1)
          Hash Key: "*VALUES*".column1, "*VALUES*".column2
          Hash Key: "*VALUES*".column1
          Hash Key: "*VALUES*".column2
          Group Key: ()
-         ->  Values Scan on "*VALUES*"
-(8 rows)
+         ->  Values Scan on "*VALUES*" (actual rows=10 loops=1)
+(9 rows)
 
 -- shouldn't try and hash
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, grouping(a,b), array_agg(v order by v)
     from gstest1 group by cube(a,b);
-                        QUERY PLAN                        
-----------------------------------------------------------
- GroupAggregate
+                           QUERY PLAN                           
+----------------------------------------------------------------
+ GroupAggregate (actual rows=16 loops=1)
    Group Key: "*VALUES*".column1, "*VALUES*".column2
    Group Key: "*VALUES*".column1
    Group Key: ()
    Sort Key: "*VALUES*".column2
      Group Key: "*VALUES*".column2
-   ->  Sort
+   ->  Sort (actual rows=10 loops=1)
          Sort Key: "*VALUES*".column1, "*VALUES*".column2
-         ->  Values Scan on "*VALUES*"
-(9 rows)
+         Sort Method: quicksort  Memory: 25kB
+         ->  Values Scan on "*VALUES*" (actual rows=10 loops=1)
+(10 rows)
 
 -- unsortable cases
 select unsortable_col, count(*)
@@ -1059,7 +1069,7 @@ select unhashable_col, unsortable_col,
                 |              1 |        2 |     4 | 195
 (6 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select unhashable_col, unsortable_col,
          grouping(unhashable_col, unsortable_col),
          count(*), sum(v)
@@ -1067,15 +1077,17 @@ explain (costs off)
    order by 3,5;
                             QUERY PLAN                            
 ------------------------------------------------------------------
- Sort
+ Sort (actual rows=6 loops=1)
    Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v))
-   ->  MixedAggregate
+   Sort Method: quicksort  Memory: 25kB
+   ->  MixedAggregate (actual rows=6 loops=1)
          Hash Key: unsortable_col
          Group Key: unhashable_col
-         ->  Sort
+         ->  Sort (actual rows=8 loops=1)
                Sort Key: unhashable_col
-               ->  Seq Scan on gstest4
-(8 rows)
+               Sort Method: quicksort  Memory: 25kB
+               ->  Seq Scan on gstest4 (actual rows=8 loops=1)
+(10 rows)
 
 select unhashable_col, unsortable_col,
        grouping(unhashable_col, unsortable_col),
@@ -1102,7 +1114,7 @@ select unhashable_col, unsortable_col,
                 |              1 |        2 |     1 | 128
 (16 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select unhashable_col, unsortable_col,
          grouping(unhashable_col, unsortable_col),
          count(*), sum(v)
@@ -1110,15 +1122,17 @@ explain (costs off)
    order by 3,5;
                             QUERY PLAN                            
 ------------------------------------------------------------------
- Sort
+ Sort (actual rows=16 loops=1)
    Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v))
-   ->  MixedAggregate
+   Sort Method: quicksort  Memory: 26kB
+   ->  MixedAggregate (actual rows=16 loops=1)
          Hash Key: v, unsortable_col
          Group Key: v, unhashable_col
-         ->  Sort
+         ->  Sort (actual rows=8 loops=1)
                Sort Key: v, unhashable_col
-               ->  Seq Scan on gstest4
-(8 rows)
+               Sort Method: quicksort  Memory: 25kB
+               ->  Seq Scan on gstest4 (actual rows=8 loops=1)
+(10 rows)
 
 -- empty input: first is 0 rows, second 1, third 3 etc.
 select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a);
@@ -1126,14 +1140,14 @@ select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a)
 ---+---+-----+-------
 (0 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a);
-           QUERY PLAN           
---------------------------------
- HashAggregate
+                       QUERY PLAN                       
+--------------------------------------------------------
+ HashAggregate (actual rows=0 loops=1)
    Hash Key: a, b
    Hash Key: a
-   ->  Seq Scan on gstest_empty
+   ->  Seq Scan on gstest_empty (actual rows=0 loops=1)
 (4 rows)
 
 select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),());
@@ -1150,16 +1164,16 @@ select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()
    |   |     |     0
 (3 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),());
-           QUERY PLAN           
---------------------------------
- MixedAggregate
+                       QUERY PLAN                       
+--------------------------------------------------------
+ MixedAggregate (actual rows=3 loops=1)
    Hash Key: a, b
    Group Key: ()
    Group Key: ()
    Group Key: ()
-   ->  Seq Scan on gstest_empty
+   ->  Seq Scan on gstest_empty (actual rows=0 loops=1)
 (6 rows)
 
 select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
@@ -1170,15 +1184,15 @@ select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
      |     0
 (3 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
-           QUERY PLAN           
---------------------------------
- Aggregate
+                       QUERY PLAN                       
+--------------------------------------------------------
+ Aggregate (actual rows=3 loops=1)
    Group Key: ()
    Group Key: ()
    Group Key: ()
-   ->  Seq Scan on gstest_empty
+   ->  Seq Scan on gstest_empty (actual rows=0 loops=1)
 (5 rows)
 
 -- check that functionally dependent cols are not nulled
@@ -1193,16 +1207,16 @@ select a, d, grouping(a,b,c)
  2 | 2 |        2
 (4 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, d, grouping(a,b,c)
     from gstest3
    group by grouping sets ((a,b), (a,c));
-        QUERY PLAN         
----------------------------
- HashAggregate
+                    QUERY PLAN                     
+---------------------------------------------------
+ HashAggregate (actual rows=4 loops=1)
    Hash Key: a, b
    Hash Key: a, c
-   ->  Seq Scan on gstest3
+   ->  Seq Scan on gstest3 (actual rows=2 loops=1)
 (4 rows)
 
 -- simple rescan tests
@@ -1219,22 +1233,23 @@ select a, b, sum(v.x)
    | 3 |   3
 (5 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, sum(v.x)
     from (values (1),(2)) v(x), gstest_data(v.x)
    group by grouping sets (a,b)
    order by 3, 1, 2;
-                             QUERY PLAN                              
----------------------------------------------------------------------
- Sort
+                               QUERY PLAN                               
+------------------------------------------------------------------------
+ Sort (actual rows=5 loops=1)
    Sort Key: (sum("*VALUES*".column1)), gstest_data.a, gstest_data.b
-   ->  HashAggregate
+   Sort Method: quicksort  Memory: 25kB
+   ->  HashAggregate (actual rows=5 loops=1)
          Hash Key: gstest_data.a
          Hash Key: gstest_data.b
-         ->  Nested Loop
-               ->  Values Scan on "*VALUES*"
-               ->  Function Scan on gstest_data
-(8 rows)
+         ->  Nested Loop (actual rows=6 loops=1)
+               ->  Values Scan on "*VALUES*" (actual rows=2 loops=1)
+               ->  Function Scan on gstest_data (actual rows=3 loops=2)
+(9 rows)
 
 select *
   from (values (1),(2)) v(x),
@@ -1242,7 +1257,7 @@ select *
 ERROR:  aggregate functions are not allowed in FROM clause of their own query level
 LINE 3:        lateral (select a, b, sum(v.x) from gstest_data(v.x) ...
                                      ^
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select *
     from (values (1),(2)) v(x),
          lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s;
@@ -1277,19 +1292,20 @@ select a, b, grouping(a,b), sum(v), count(*), max(v)
    |   |        3 |  37 |     2 |  19
 (21 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, grouping(a,b), sum(v), count(*), max(v)
     from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6;
                                         QUERY PLAN                                         
 -------------------------------------------------------------------------------------------
- Sort
+ Sort (actual rows=21 loops=1)
    Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), (max("*VALUES*".column3))
-   ->  HashAggregate
+   Sort Method: quicksort  Memory: 26kB
+   ->  HashAggregate (actual rows=21 loops=1)
          Hash Key: "*VALUES*".column1, "*VALUES*".column2
          Hash Key: ("*VALUES*".column1 + 1), ("*VALUES*".column2 + 1)
          Hash Key: ("*VALUES*".column1 + 2), ("*VALUES*".column2 + 2)
-         ->  Values Scan on "*VALUES*"
-(7 rows)
+         ->  Values Scan on "*VALUES*" (actual rows=10 loops=1)
+(8 rows)
 
 select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
   from gstest2 group by cube (a,b) order by rsum, a, b;
@@ -1305,23 +1321,25 @@ select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
    |   |  12 |   48
 (8 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
     from gstest2 group by cube (a,b) order by rsum, a, b;
-                 QUERY PLAN                  
----------------------------------------------
- Sort
+                             QUERY PLAN                              
+---------------------------------------------------------------------
+ Sort (actual rows=8 loops=1)
    Sort Key: (sum((sum(c))) OVER (?)), a, b
-   ->  WindowAgg
-         ->  Sort
+   Sort Method: quicksort  Memory: 25kB
+   ->  WindowAgg (actual rows=8 loops=1)
+         ->  Sort (actual rows=8 loops=1)
                Sort Key: a, b
-               ->  MixedAggregate
+               Sort Method: quicksort  Memory: 25kB
+               ->  MixedAggregate (actual rows=8 loops=1)
                      Hash Key: a, b
                      Hash Key: a
                      Hash Key: b
                      Group Key: ()
-                     ->  Seq Scan on gstest2
-(11 rows)
+                     ->  Seq Scan on gstest2 (actual rows=9 loops=1)
+(13 rows)
 
 select a, b, sum(v.x)
   from (values (1),(2)) v(x), gstest_data(v.x)
@@ -1342,23 +1360,24 @@ select a, b, sum(v.x)
    |   |   9
 (12 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, sum(v.x)
     from (values (1),(2)) v(x), gstest_data(v.x)
    group by cube (a,b) order by a,b;
-                   QUERY PLAN                   
-------------------------------------------------
- Sort
+                               QUERY PLAN                               
+------------------------------------------------------------------------
+ Sort (actual rows=12 loops=1)
    Sort Key: gstest_data.a, gstest_data.b
-   ->  MixedAggregate
+   Sort Method: quicksort  Memory: 25kB
+   ->  MixedAggregate (actual rows=12 loops=1)
          Hash Key: gstest_data.a, gstest_data.b
          Hash Key: gstest_data.a
          Hash Key: gstest_data.b
          Group Key: ()
-         ->  Nested Loop
-               ->  Values Scan on "*VALUES*"
-               ->  Function Scan on gstest_data
-(10 rows)
+         ->  Nested Loop (actual rows=6 loops=1)
+               ->  Values Scan on "*VALUES*" (actual rows=2 loops=1)
+               ->  Function Scan on gstest_data (actual rows=3 loops=2)
+(11 rows)
 
 -- Verify that we correctly handle the child node returning a
 -- non-minimal slot, which happens if the input is pre-sorted,
@@ -1543,15 +1562,15 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou
 -- test the knapsack
 set enable_indexscan = false;
 set work_mem = '64kB';
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select unique1,
          count(two), count(four), count(ten),
          count(hundred), count(thousand), count(twothousand),
          count(*)
     from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two);
-          QUERY PLAN           
--------------------------------
- MixedAggregate
+                        QUERY PLAN                         
+-----------------------------------------------------------
+ MixedAggregate (actual rows=13116 loops=1)
    Hash Key: two
    Hash Key: four
    Hash Key: ten
@@ -1561,40 +1580,42 @@ explain (costs off)
      Group Key: twothousand
    Sort Key: thousand
      Group Key: thousand
-   ->  Sort
+   ->  Sort (actual rows=10000 loops=1)
          Sort Key: unique1
-         ->  Seq Scan on tenk1
-(13 rows)
+         Sort Method: external merge  Disk: 392kB
+         ->  Seq Scan on tenk1 (actual rows=10000 loops=1)
+(14 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select unique1,
          count(two), count(four), count(ten),
          count(hundred), count(thousand), count(twothousand),
          count(*)
     from tenk1 group by grouping sets (unique1,hundred,ten,four,two);
-          QUERY PLAN           
--------------------------------
- MixedAggregate
+                        QUERY PLAN                         
+-----------------------------------------------------------
+ MixedAggregate (actual rows=10116 loops=1)
    Hash Key: two
    Hash Key: four
    Hash Key: ten
    Hash Key: hundred
    Group Key: unique1
-   ->  Sort
+   ->  Sort (actual rows=10000 loops=1)
          Sort Key: unique1
-         ->  Seq Scan on tenk1
-(9 rows)
+         Sort Method: external merge  Disk: 392kB
+         ->  Seq Scan on tenk1 (actual rows=10000 loops=1)
+(10 rows)
 
 set work_mem = '384kB';
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select unique1,
          count(two), count(four), count(ten),
          count(hundred), count(thousand), count(twothousand),
          count(*)
     from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two);
-          QUERY PLAN           
--------------------------------
- MixedAggregate
+                        QUERY PLAN                         
+-----------------------------------------------------------
+ MixedAggregate (actual rows=13116 loops=1)
    Hash Key: two
    Hash Key: four
    Hash Key: ten
@@ -1603,10 +1624,11 @@ explain (costs off)
    Group Key: unique1
    Sort Key: twothousand
      Group Key: twothousand
-   ->  Sort
+   ->  Sort (actual rows=10000 loops=1)
          Sort Key: unique1
-         ->  Seq Scan on tenk1
-(12 rows)
+         Sort Method: external merge  Disk: 392kB
+         ->  Seq Scan on tenk1 (actual rows=10000 loops=1)
+(13 rows)
 
 -- check collation-sensitive matching between grouping expressions
 -- (similar to a check for aggregates, but there are additional code
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index 96dfb7c..94cf969 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -290,21 +290,23 @@ execute tenk1_count(1);
 deallocate tenk1_count;
 -- test parallel plans for queries containing un-correlated subplans.
 alter table tenk2 set (parallel_workers = 0);
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 	select count(*) from tenk1 where (two, four) not in
 	(select hundred, thousand from tenk2 where thousand > 100);
-                      QUERY PLAN                      
-------------------------------------------------------
- Finalize Aggregate
-   ->  Gather
+                               QUERY PLAN                                
+-------------------------------------------------------------------------
+ Finalize Aggregate (actual rows=1 loops=1)
+   ->  Gather (actual rows=5 loops=1)
          Workers Planned: 4
-         ->  Partial Aggregate
-               ->  Parallel Seq Scan on tenk1
+         Workers Launched: 4
+         ->  Partial Aggregate (actual rows=1 loops=5)
+               ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
                      Filter: (NOT (hashed SubPlan 1))
                      SubPlan 1
-                       ->  Seq Scan on tenk2
+                       ->  Seq Scan on tenk2 (actual rows=8990 loops=5)
                              Filter: (thousand > 100)
-(9 rows)
+                             Rows Removed by Filter: 1010
+(11 rows)
 
 select count(*) from tenk1 where (two, four) not in
 	(select hundred, thousand from tenk2 where thousand > 100);
diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out
index 71a677b..55991c8 100644
--- a/src/test/regress/expected/subselect.out
+++ b/src/test/regress/expected/subselect.out
@@ -782,6 +782,17 @@ select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
                  Output: 'bar'::name
 (8 rows)
 
+explain (analyze, timing off, summary off, costs off)
+select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
+                  QUERY PLAN                  
+----------------------------------------------
+ Result (actual rows=1 loops=1)
+   SubPlan 1
+     ->  Append (actual rows=2 loops=1)
+           ->  Result (actual rows=1 loops=1)
+           ->  Result (actual rows=1 loops=1)
+(5 rows)
+
 select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
  ?column? 
 ----------
@@ -974,6 +985,22 @@ select * from int4_tbl where
            Output: a.unique1
 (10 rows)
 
+explain (analyze, timing off, summary off, costs off)
+select * from int4_tbl where
+  (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in
+  (select ten from tenk1 b);
+                                          QUERY PLAN                                           
+-----------------------------------------------------------------------------------------------
+ Nested Loop Semi Join (actual rows=1 loops=1)
+   Join Filter: (CASE WHEN (hashed SubPlan 1) THEN int4_tbl.f1 ELSE NULL::integer END = b.ten)
+   Rows Removed by Join Filter: 40000
+   ->  Seq Scan on int4_tbl (actual rows=5 loops=1)
+   ->  Seq Scan on tenk1 b (actual rows=8000 loops=5)
+   SubPlan 1
+     ->  Index Only Scan using tenk1_unique1 on tenk1 a (actual rows=10000 loops=1)
+           Heap Fetches: 0
+(8 rows)
+
 select * from int4_tbl where
   (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in
   (select ten from tenk1 b);
@@ -1377,6 +1404,29 @@ select * from x;
                        Output: z1.a
 (16 rows)
 
+explain (analyze, timing off, summary off, costs off)
+with recursive x(a) as
+  ((values ('a'), ('b'))
+   union all
+   (with z as not materialized (select * from x)
+    select z.a || z1.a as a from z cross join z as z1
+    where length(z.a || z1.a) < 5))
+select * from x;
+                              QUERY PLAN                               
+-----------------------------------------------------------------------
+ CTE Scan on x (actual rows=22 loops=1)
+   CTE x
+     ->  Recursive Union (actual rows=22 loops=1)
+           ->  Values Scan on "*VALUES*" (actual rows=2 loops=1)
+           ->  Nested Loop (actual rows=7 loops=3)
+                 Join Filter: (length((z.a || z1.a)) < 5)
+                 Rows Removed by Join Filter: 85
+                 CTE z
+                   ->  WorkTable Scan on x x_1 (actual rows=7 loops=3)
+                 ->  CTE Scan on z (actual rows=7 loops=3)
+                 ->  CTE Scan on z z1 (actual rows=13 loops=22)
+(11 rows)
+
 with recursive x(a) as
   ((values ('a'), ('b'))
    union all
@@ -1431,6 +1481,25 @@ select * from x;
                  Filter: (length((x_1.a || x_1.a)) < 5)
 (9 rows)
 
+explain (analyze, timing off, summary off, costs off)
+with recursive x(a) as
+  ((values ('a'), ('b'))
+   union all
+   (with z as not materialized (select * from x)
+    select z.a || z.a as a from z
+    where length(z.a || z.a) < 5))
+select * from x;
+                           QUERY PLAN                            
+-----------------------------------------------------------------
+ CTE Scan on x (actual rows=6 loops=1)
+   CTE x
+     ->  Recursive Union (actual rows=6 loops=1)
+           ->  Values Scan on "*VALUES*" (actual rows=2 loops=1)
+           ->  WorkTable Scan on x x_1 (actual rows=1 loops=3)
+                 Filter: (length((a || a)) < 5)
+                 Rows Removed by Filter: 1
+(7 rows)
+
 with recursive x(a) as
   ((values ('a'), ('b'))
    union all
diff --git a/src/test/regress/expected/union.out b/src/test/regress/expected/union.out
index 6e72e92..dcd51a7 100644
--- a/src/test/regress/expected/union.out
+++ b/src/test/regress/expected/union.out
@@ -347,20 +347,21 @@ ERROR:  FOR NO KEY UPDATE is not allowed with UNION/INTERSECT/EXCEPT
 
 -- exercise both hashed and sorted implementations of INTERSECT/EXCEPT
 set enable_hashagg to on;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 select count(*) from
   ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
-                                     QUERY PLAN                                     
-------------------------------------------------------------------------------------
- Aggregate
-   ->  Subquery Scan on ss
-         ->  HashSetOp Intersect
-               ->  Append
-                     ->  Subquery Scan on "*SELECT* 2"
-                           ->  Seq Scan on tenk1
-                     ->  Subquery Scan on "*SELECT* 1"
-                           ->  Index Only Scan using tenk1_unique1 on tenk1 tenk1_1
-(8 rows)
+                                                   QUERY PLAN                                                   
+----------------------------------------------------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+   ->  Subquery Scan on ss (actual rows=5000 loops=1)
+         ->  HashSetOp Intersect (actual rows=5000 loops=1)
+               ->  Append (actual rows=20000 loops=1)
+                     ->  Subquery Scan on "*SELECT* 2" (actual rows=10000 loops=1)
+                           ->  Seq Scan on tenk1 (actual rows=10000 loops=1)
+                     ->  Subquery Scan on "*SELECT* 1" (actual rows=10000 loops=1)
+                           ->  Index Only Scan using tenk1_unique1 on tenk1 tenk1_1 (actual rows=10000 loops=1)
+                                 Heap Fetches: 0
+(9 rows)
 
 select count(*) from
   ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
@@ -389,22 +390,24 @@ select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
 (1 row)
 
 set enable_hashagg to off;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 select count(*) from
   ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
-                                        QUERY PLAN                                        
-------------------------------------------------------------------------------------------
- Aggregate
-   ->  Subquery Scan on ss
-         ->  SetOp Intersect
-               ->  Sort
+                                                      QUERY PLAN                                                      
+----------------------------------------------------------------------------------------------------------------------
+ Aggregate (actual rows=1 loops=1)
+   ->  Subquery Scan on ss (actual rows=5000 loops=1)
+         ->  SetOp Intersect (actual rows=5000 loops=1)
+               ->  Sort (actual rows=20000 loops=1)
                      Sort Key: "*SELECT* 2".fivethous
-                     ->  Append
-                           ->  Subquery Scan on "*SELECT* 2"
-                                 ->  Seq Scan on tenk1
-                           ->  Subquery Scan on "*SELECT* 1"
-                                 ->  Index Only Scan using tenk1_unique1 on tenk1 tenk1_1
-(10 rows)
+                     Sort Method: quicksort  Memory: 1862kB
+                     ->  Append (actual rows=20000 loops=1)
+                           ->  Subquery Scan on "*SELECT* 2" (actual rows=10000 loops=1)
+                                 ->  Seq Scan on tenk1 (actual rows=10000 loops=1)
+                           ->  Subquery Scan on "*SELECT* 1" (actual rows=10000 loops=1)
+                                 ->  Index Only Scan using tenk1_unique1 on tenk1 tenk1_1 (actual rows=10000 loops=1)
+                                       Heap Fetches: 0
+(12 rows)
 
 select count(*) from
   ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
@@ -580,16 +583,16 @@ select from generate_series(1,5) union select from generate_series(1,3);
          ->  Function Scan on generate_series generate_series_1
 (4 rows)
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 select from generate_series(1,5) intersect select from generate_series(1,3);
-                              QUERY PLAN                              
-----------------------------------------------------------------------
- HashSetOp Intersect
-   ->  Append
-         ->  Subquery Scan on "*SELECT* 1"
-               ->  Function Scan on generate_series
-         ->  Subquery Scan on "*SELECT* 2"
-               ->  Function Scan on generate_series generate_series_1
+                                          QUERY PLAN                                          
+----------------------------------------------------------------------------------------------
+ HashSetOp Intersect (actual rows=1 loops=1)
+   ->  Append (actual rows=8 loops=1)
+         ->  Subquery Scan on "*SELECT* 1" (actual rows=5 loops=1)
+               ->  Function Scan on generate_series (actual rows=5 loops=1)
+         ->  Subquery Scan on "*SELECT* 2" (actual rows=3 loops=1)
+               ->  Function Scan on generate_series generate_series_1 (actual rows=3 loops=1)
 (6 rows)
 
 select from generate_series(1,5) union select from generate_series(1,3);
diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql
index 3e593f2..3d37754 100644
--- a/src/test/regress/sql/aggregates.sql
+++ b/src/test/regress/sql/aggregates.sql
@@ -1029,6 +1029,6 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*)
 -- Make sure that generation of HashAggregate for uniqification purposes
 -- does not lead to array overflow due to unexpected duplicate hash keys
 -- see cafeejokku0u+a_a9r9316djw-yw3-+gtgvy3ju655qrhr3j...@mail.gmail.com
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select 1 from tenk1
    where (hundred, thousand) in (select twothousand, twothousand from onek);
diff --git a/src/test/regress/sql/groupingsets.sql b/src/test/regress/sql/groupingsets.sql
index 95ac3fb..1f18365 100644
--- a/src/test/regress/sql/groupingsets.sql
+++ b/src/test/regress/sql/groupingsets.sql
@@ -143,7 +143,7 @@ select a, d, grouping(a,b,c)
 
 -- check that distinct grouping columns are kept separate
 -- even if they are equal()
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 select g as alias1, g as alias2
   from generate_series(1,3) g
  group by alias1, rollup(alias2);
@@ -183,7 +183,7 @@ select *
        lateral (select a, b, sum(v.x) from gstest_data(v.x) group by rollup (a,b)) s;
 
 -- min max optimization should still work with GROUP BY ()
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select min(unique1) from tenk1 GROUP BY ();
 
 -- Views with GROUPING SET queries
@@ -214,7 +214,7 @@ select a, b, sum(v.x)
  group by cube (a,b) order by a,b;
 
 -- Test reordering of grouping sets
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 select * from gstest1 group by grouping sets((a,b,v),(v)) order by v,b,a;
 
 -- Agg level check. This query should error out.
@@ -231,12 +231,12 @@ having exists (select 1 from onek b where sum(distinct a.four) = b.four);
 -- Tests around pushdown of HAVING clauses, partially testing against previous bugs
 select a,count(*) from gstest2 group by rollup(a) order by a;
 select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a;
 
 select v.c, (select count(*) from gstest2 group by () having v.c)
   from (values (false),(true)) v(c) order by v.c;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select v.c, (select count(*) from gstest2 group by () having v.c)
     from (values (false),(true)) v(c) order by v.c;
 
@@ -282,16 +282,16 @@ select array_agg(v order by v) from gstest4 group by grouping sets ((id,unsortab
 
 select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by grouping sets ((a),(b)) order by 3,1,2;
-explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v)
+explain (costs off, timing off, summary off, analyze) select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by grouping sets ((a),(b)) order by 3,1,2;
 
 select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by cube(a,b) order by 3,1,2;
-explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v)
+explain (costs off, timing off, summary off, analyze) select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by cube(a,b) order by 3,1,2;
 
 -- shouldn't try and hash
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, grouping(a,b), array_agg(v order by v)
     from gstest1 group by cube(a,b);
 
@@ -306,7 +306,7 @@ select unhashable_col, unsortable_col,
        count(*), sum(v)
   from gstest4 group by grouping sets ((unhashable_col),(unsortable_col))
  order by 3, 5;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select unhashable_col, unsortable_col,
          grouping(unhashable_col, unsortable_col),
          count(*), sum(v)
@@ -318,7 +318,7 @@ select unhashable_col, unsortable_col,
        count(*), sum(v)
   from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col))
  order by 3,5;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select unhashable_col, unsortable_col,
          grouping(unhashable_col, unsortable_col),
          count(*), sum(v)
@@ -327,21 +327,21 @@ explain (costs off)
 
 -- empty input: first is 0 rows, second 1, third 3 etc.
 select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a);
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a);
 select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),());
 select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),());
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),());
 select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
 
 -- check that functionally dependent cols are not nulled
 select a, d, grouping(a,b,c)
   from gstest3
  group by grouping sets ((a,b), (a,c));
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, d, grouping(a,b,c)
     from gstest3
    group by grouping sets ((a,b), (a,c));
@@ -352,7 +352,7 @@ select a, b, sum(v.x)
   from (values (1),(2)) v(x), gstest_data(v.x)
  group by grouping sets (a,b)
  order by 1, 2, 3;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, sum(v.x)
     from (values (1),(2)) v(x), gstest_data(v.x)
    group by grouping sets (a,b)
@@ -360,7 +360,7 @@ explain (costs off)
 select *
   from (values (1),(2)) v(x),
        lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select *
     from (values (1),(2)) v(x),
          lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s;
@@ -368,18 +368,18 @@ explain (costs off)
 -- Tests for chained aggregates
 select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, grouping(a,b), sum(v), count(*), max(v)
     from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6;
 select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
   from gstest2 group by cube (a,b) order by rsum, a, b;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
     from gstest2 group by cube (a,b) order by rsum, a, b;
 select a, b, sum(v.x)
   from (values (1),(2)) v(x), gstest_data(v.x)
  group by cube (a,b) order by a,b;
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select a, b, sum(v.x)
     from (values (1),(2)) v(x), gstest_data(v.x)
    group by cube (a,b) order by a,b;
@@ -409,13 +409,13 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou
 
 set enable_indexscan = false;
 set work_mem = '64kB';
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select unique1,
          count(two), count(four), count(ten),
          count(hundred), count(thousand), count(twothousand),
          count(*)
     from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two);
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select unique1,
          count(two), count(four), count(ten),
          count(hundred), count(thousand), count(twothousand),
@@ -423,7 +423,7 @@ explain (costs off)
     from tenk1 group by grouping sets (unique1,hundred,ten,four,two);
 
 set work_mem = '384kB';
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
   select unique1,
          count(two), count(four), count(ten),
          count(hundred), count(thousand), count(twothousand),
diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql
index 11e7735..49d44e2 100644
--- a/src/test/regress/sql/select_parallel.sql
+++ b/src/test/regress/sql/select_parallel.sql
@@ -108,13 +108,13 @@ deallocate tenk1_count;
 
 -- test parallel plans for queries containing un-correlated subplans.
 alter table tenk2 set (parallel_workers = 0);
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 	select count(*) from tenk1 where (two, four) not in
 	(select hundred, thousand from tenk2 where thousand > 100);
 select count(*) from tenk1 where (two, four) not in
 	(select hundred, thousand from tenk2 where thousand > 100);
 -- this is not parallel-safe due to use of random() within SubLink's testexpr:
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 	select * from tenk1 where (unique1 + random())::integer not in
 	(select ten from tenk2);
 alter table tenk2 reset (parallel_workers);
diff --git a/src/test/regress/sql/subselect.sql b/src/test/regress/sql/subselect.sql
index bd8d2f6..b7e7734 100644
--- a/src/test/regress/sql/subselect.sql
+++ b/src/test/regress/sql/subselect.sql
@@ -460,6 +460,9 @@ select * from outer_text where (f1, f2) not in (select * from inner_text);
 explain (verbose, costs off)
 select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
 
+explain (analyze, timing off, summary off, costs off)
+select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
+
 select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
 
 --
@@ -539,6 +542,10 @@ explain (verbose, costs off)
 select * from int4_tbl where
   (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in
   (select ten from tenk1 b);
+explain (analyze, timing off, summary off, costs off)
+select * from int4_tbl where
+  (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in
+  (select ten from tenk1 b);
 select * from int4_tbl where
   (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in
   (select ten from tenk1 b);
@@ -722,6 +729,15 @@ with recursive x(a) as
     where length(z.a || z1.a) < 5))
 select * from x;
 
+explain (analyze, timing off, summary off, costs off)
+with recursive x(a) as
+  ((values ('a'), ('b'))
+   union all
+   (with z as not materialized (select * from x)
+    select z.a || z1.a as a from z cross join z as z1
+    where length(z.a || z1.a) < 5))
+select * from x;
+
 with recursive x(a) as
   ((values ('a'), ('b'))
    union all
@@ -739,6 +755,15 @@ with recursive x(a) as
     where length(z.a || z.a) < 5))
 select * from x;
 
+explain (analyze, timing off, summary off, costs off)
+with recursive x(a) as
+  ((values ('a'), ('b'))
+   union all
+   (with z as not materialized (select * from x)
+    select z.a || z.a as a from z
+    where length(z.a || z.a) < 5))
+select * from x;
+
 with recursive x(a) as
   ((values ('a'), ('b'))
    union all
diff --git a/src/test/regress/sql/union.sql b/src/test/regress/sql/union.sql
index 5f4881d..7fbe801 100644
--- a/src/test/regress/sql/union.sql
+++ b/src/test/regress/sql/union.sql
@@ -122,7 +122,7 @@ SELECT q1 FROM int8_tbl EXCEPT ALL SELECT q1 FROM int8_tbl FOR NO KEY UPDATE;
 
 set enable_hashagg to on;
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 select count(*) from
   ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
 select count(*) from
@@ -134,7 +134,7 @@ select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
 
 set enable_hashagg to off;
 
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 select count(*) from
   ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
 select count(*) from
@@ -204,7 +204,7 @@ set enable_sort = false;
 
 explain (costs off)
 select from generate_series(1,5) union select from generate_series(1,3);
-explain (costs off)
+explain (costs off, timing off, summary off, analyze)
 select from generate_series(1,5) intersect select from generate_series(1,3);
 
 select from generate_series(1,5) union select from generate_series(1,3);
-- 
2.7.4

>From f35f10afc8349eb642ea300782105de097afd5c9 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Tue, 31 Dec 2019 18:49:41 -0600
Subject: [PATCH v2 2/7] explain to show tuplehash bucket and memory stats..

Discussion: https://www.postgresql.org/message-id/flat/20200103161925.gm12...@telsasoft.com
---
 src/backend/commands/explain.c                | 131 +++++++++++++++++++++++---
 src/backend/executor/execGrouping.c           |  27 ++++++
 src/backend/executor/nodeAgg.c                |   2 +
 src/backend/executor/nodeRecursiveunion.c     |   3 +
 src/backend/executor/nodeSetOp.c              |   1 +
 src/backend/executor/nodeSubplan.c            |   3 +
 src/include/executor/executor.h               |   1 +
 src/include/nodes/execnodes.h                 |  10 ++
 src/test/regress/expected/aggregates.out      |   3 +-
 src/test/regress/expected/groupingsets.out    |  64 ++++++++++---
 src/test/regress/expected/select_parallel.out |   4 +-
 src/test/regress/expected/subselect.out       |   8 +-
 src/test/regress/expected/union.out           |   6 +-
 src/test/regress/sql/select_parallel.sql      |   2 +-
 14 files changed, 232 insertions(+), 33 deletions(-)

diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index d901dc4..e262108 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -18,6 +18,7 @@
 #include "commands/createas.h"
 #include "commands/defrem.h"
 #include "commands/prepare.h"
+#include "executor/nodeAgg.h"
 #include "executor/nodeHash.h"
 #include "foreign/fdwapi.h"
 #include "jit/jit.h"
@@ -86,12 +87,13 @@ static void show_merge_append_keys(MergeAppendState *mstate, List *ancestors,
 								   ExplainState *es);
 static void show_agg_keys(AggState *astate, List *ancestors,
 						  ExplainState *es);
-static void show_grouping_sets(PlanState *planstate, Agg *agg,
+static void show_grouping_sets(AggState *planstate, Agg *agg,
 							   List *ancestors, ExplainState *es);
-static void show_grouping_set_keys(PlanState *planstate,
+static void show_grouping_set_keys(AggState *aggstate,
 								   Agg *aggnode, Sort *sortnode,
 								   List *context, bool useprefix,
-								   List *ancestors, ExplainState *es);
+								   List *ancestors, ExplainState *es,
+								   hash_instrumentation *inst);
 static void show_group_keys(GroupState *gstate, List *ancestors,
 							ExplainState *es);
 static void show_sort_group_keys(PlanState *planstate, const char *qlabel,
@@ -104,6 +106,7 @@ static void show_tablesample(TableSampleClause *tsc, PlanState *planstate,
 							 List *ancestors, ExplainState *es);
 static void show_sort_info(SortState *sortstate, ExplainState *es);
 static void show_hash_info(HashState *hashstate, ExplainState *es);
+static void show_tuplehash_info(hash_instrumentation *inst, ExplainState *es);
 static void show_tidbitmap_info(BitmapHeapScanState *planstate,
 								ExplainState *es);
 static void show_instrumentation_count(const char *qlabel, int which,
@@ -1489,6 +1492,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 					appendStringInfo(es->str, " %s", setopcmd);
 				else
 					ExplainPropertyText("Command", setopcmd, es);
+				// show strategy in text mode ?
 			}
 			break;
 		default:
@@ -1886,6 +1890,20 @@ ExplainNode(PlanState *planstate, List *ancestors,
 				show_instrumentation_count("Rows Removed by Filter", 1,
 										   planstate, es);
 			break;
+		case T_SetOp:
+			{
+				SetOpState *sos = castNode(SetOpState, planstate);
+				if (sos->hashtable)
+					show_tuplehash_info(&sos->hashtable->instrument, es);
+			}
+			break;
+		case T_RecursiveUnion:
+			{
+				RecursiveUnionState *rus = (RecursiveUnionState *)planstate;
+				if (rus->hashtable)
+					show_tuplehash_info(&rus->hashtable->instrument, es);
+				break;
+			}
 		case T_Group:
 			show_group_keys(castNode(GroupState, planstate), ancestors, es);
 			show_upper_qual(plan->qual, "Filter", planstate, ancestors, es);
@@ -2262,21 +2280,26 @@ show_agg_keys(AggState *astate, List *ancestors,
 		ancestors = lcons(plan, ancestors);
 
 		if (plan->groupingSets)
-			show_grouping_sets(outerPlanState(astate), plan, ancestors, es);
-		else
+			show_grouping_sets(astate, plan, ancestors, es);
+		else {
 			show_sort_group_keys(outerPlanState(astate), "Group Key",
 								 plan->numCols, plan->grpColIdx,
 								 NULL, NULL, NULL,
 								 ancestors, es);
+			Assert(astate->num_hashes<=1);
+			if (astate->num_hashes)
+				show_tuplehash_info(&astate->perhash[0].hashtable->instrument, es);
+		}
 
 		ancestors = list_delete_first(ancestors);
 	}
 }
 
 static void
-show_grouping_sets(PlanState *planstate, Agg *agg,
+show_grouping_sets(AggState *aggstate, Agg *agg,
 				   List *ancestors, ExplainState *es)
 {
+	PlanState	*planstate = outerPlanState(aggstate);
 	List	   *context;
 	bool		useprefix;
 	ListCell   *lc;
@@ -2289,27 +2312,43 @@ show_grouping_sets(PlanState *planstate, Agg *agg,
 
 	ExplainOpenGroup("Grouping Sets", "Grouping Sets", false, es);
 
-	show_grouping_set_keys(planstate, agg, NULL,
-						   context, useprefix, ancestors, es);
+	// this will show things twice??
+	show_grouping_set_keys(aggstate, agg, NULL,
+						   context, useprefix, ancestors, es,
+						   aggstate->num_hashes ? &aggstate->perhash[0].hashtable->instrument : NULL);
 
 	foreach(lc, agg->chain)
 	{
 		Agg		   *aggnode = lfirst(lc);
 		Sort	   *sortnode = (Sort *) aggnode->plan.lefttree;
+		hash_instrumentation *inst;
 
-		show_grouping_set_keys(planstate, aggnode, sortnode,
-							   context, useprefix, ancestors, es);
+		if (aggnode->aggstrategy == AGG_HASHED ||
+				aggnode->aggstrategy == AGG_MIXED) {
+			int	nth = list_cell_number(agg->chain, lc);
+			Assert(nth < aggstate->num_hashes);
+			inst = &aggstate->perhash[nth].hashtable->instrument;
+		}
+		else
+			inst = NULL;
+
+		show_grouping_set_keys(aggstate, aggnode, sortnode,
+							   context, useprefix, ancestors, es,
+							   inst);
 	}
 
 	ExplainCloseGroup("Grouping Sets", "Grouping Sets", false, es);
 }
 
 static void
-show_grouping_set_keys(PlanState *planstate,
+show_grouping_set_keys(AggState *aggstate,
 					   Agg *aggnode, Sort *sortnode,
 					   List *context, bool useprefix,
-					   List *ancestors, ExplainState *es)
+					   List *ancestors, ExplainState *es,
+					   hash_instrumentation *inst)
+
 {
+	PlanState	*planstate = outerPlanState(aggstate);
 	Plan	   *plan = planstate->plan;
 	char	   *exprstr;
 	ListCell   *lc;
@@ -2369,6 +2408,10 @@ show_grouping_set_keys(PlanState *planstate,
 			ExplainPropertyText(keyname, "()", es);
 		else
 			ExplainPropertyListNested(keyname, result, es);
+
+		if (aggnode->aggstrategy == AGG_HASHED ||
+				aggnode->aggstrategy == AGG_MIXED)
+			show_tuplehash_info(inst, es);
 	}
 
 	ExplainCloseGroup(keysetname, keysetname, false, es);
@@ -2770,6 +2813,59 @@ show_hash_info(HashState *hashstate, ExplainState *es)
 }
 
 /*
+ * Show hash bucket stats and (optionally) memory.
+ */
+
+// fprintf(stderr, "memallocated %lu\n", astate->hashcontext->ecxt_per_query_memory->mem_allocated);
+// perhash->aggnode->numGroups; memctx; AggState->
+static void
+show_tuplehash_info(hash_instrumentation *inst, ExplainState *es)
+{
+	long	spacePeakKb_tuples = (inst->space_peak_tuples + 1023) / 1024,
+		spacePeakKb_hash = (inst->space_peak_hash + 1023) / 1024;
+
+	if (!es->analyze)
+		return;
+
+	if (es->format != EXPLAIN_FORMAT_TEXT)
+	{
+		ExplainPropertyInteger("Hash Buckets", NULL,
+							   inst->nbuckets, es);
+		ExplainPropertyInteger("Original Hash Buckets", NULL,
+							   inst->nbuckets_original, es);
+		ExplainPropertyInteger("Peak Memory Usage (hashtable)", "kB",
+							   spacePeakKb_hash, es);
+		ExplainPropertyInteger("Peak Memory Usage (tuples)", "kB",
+							   spacePeakKb_tuples, es);
+	}
+	else if (!inst->nbuckets)
+		; /* Do nothing */
+	else
+	{
+		if (inst->nbuckets_original != inst->nbuckets) {
+			ExplainIndentText(es);
+			appendStringInfo(es->str,
+						"Buckets: %ld (originally %ld)",
+						inst->nbuckets,
+						inst->nbuckets_original);
+		}
+		else
+		{
+			ExplainIndentText(es);
+			appendStringInfo(es->str,
+						"Buckets: %ld",
+						inst->nbuckets);
+		}
+
+		if (es->verbose)
+			appendStringInfo(es->str,
+					"  Memory Usage: hashtable: %ldkB, tuples: %ldkB",
+					spacePeakKb_hash, spacePeakKb_tuples);
+		appendStringInfoChar(es->str, '\n');
+	}
+}
+
+/*
  * If it's EXPLAIN ANALYZE, show exact/lossy pages for a BitmapHeapScan node
  */
 static void
@@ -3436,6 +3532,17 @@ ExplainSubPlans(List *plans, List *ancestors,
 
 		ExplainNode(sps->planstate, ancestors,
 					relationship, sp->plan_name, es);
+		if (sps->hashtable)
+			show_tuplehash_info(&sps->hashtable->instrument, es);
+		if (sps->hashnulls) {
+			ExplainOpenGroup("Null hashtable", "Null hashtable", true, es);
+			if (es->format == EXPLAIN_FORMAT_TEXT) {
+				ExplainIndentText(es);
+				appendStringInfoString(es->str, "Null hashtable: ");
+			}
+			show_tuplehash_info(&sps->hashnulls->instrument, es);
+			ExplainCloseGroup("Null hashtable", "Null hashtable", true, es);
+		}
 
 		ancestors = list_delete_first(ancestors);
 	}
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index de0205f..5641b3f 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -191,6 +191,7 @@ BuildTupleHashTableExt(PlanState *parent,
 	hashtable->inputslot = NULL;
 	hashtable->in_hash_funcs = NULL;
 	hashtable->cur_eq_func = NULL;
+	memset(&hashtable->instrument, 0, sizeof(hashtable->instrument));
 
 	/*
 	 * If parallelism is in use, even if the master backend is performing the
@@ -206,6 +207,7 @@ BuildTupleHashTableExt(PlanState *parent,
 		hashtable->hash_iv = 0;
 
 	hashtable->hashtab = tuplehash_create(metacxt, nbuckets, hashtable);
+	UpdateTupleHashTableStats(hashtable, true);
 
 	/*
 	 * We copy the input tuple descriptor just for safety --- we assume all
@@ -284,9 +286,34 @@ BuildTupleHashTable(PlanState *parent,
 void
 ResetTupleHashTable(TupleHashTable hashtable)
 {
+	UpdateTupleHashTableStats(hashtable, false);
 	tuplehash_reset(hashtable->hashtab);
 }
 
+/* Update instrumentation stats */
+void
+UpdateTupleHashTableStats(TupleHashTable hashtable, bool initial)
+{
+	hashtable->instrument.nbuckets = hashtable->hashtab->size;
+	if (initial) {
+		hashtable->instrument.nbuckets_original = hashtable->hashtab->size;
+		hashtable->instrument.space_peak_hash = hashtable->hashtab->size * sizeof(TupleHashEntryData);
+		hashtable->instrument.space_peak_tuples = 0;
+	}
+	else
+	{
+#define maxself(a,b) a=Max(a,b)
+		/* hashtable->entrysize includes additionalsize */
+		maxself(hashtable->instrument.space_peak_hash,
+				hashtable->hashtab->size * sizeof(TupleHashEntryData) +
+				hashtable->hashtab->members * (hashtable->entrysize - sizeof(TupleHashEntryData)));
+
+		maxself(hashtable->instrument.space_peak_tuples,
+				hashtable->hashtab->members * sizeof(MinimalTuple));
+#undef maxself
+	}
+}
+
 /*
  * Find or create a hashtable entry for the tuple group containing the
  * given tuple.  The tuple must be the same type as the hashtable entries.
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index b7f49ce..170dfc7 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1666,6 +1666,7 @@ agg_retrieve_direct(AggState *aggstate)
 				 */
 				initialize_phase(aggstate, 0);
 				aggstate->table_filled = true;
+				UpdateTupleHashTableStats(aggstate->perhash[0].hashtable, false);
 				ResetTupleHashIterator(aggstate->perhash[0].hashtable,
 									   &aggstate->perhash[0].hashiter);
 				select_current_set(aggstate, 0, true);
@@ -1937,6 +1938,7 @@ agg_fill_hash_table(AggState *aggstate)
 	}
 
 	aggstate->table_filled = true;
+	UpdateTupleHashTableStats(aggstate->perhash[aggstate->current_set].hashtable, false);
 	/* Initialize to walk the first hash table */
 	select_current_set(aggstate, 0, true);
 	ResetTupleHashIterator(aggstate->perhash[0].hashtable,
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index 620414a..93272c2 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -156,6 +156,9 @@ ExecRecursiveUnion(PlanState *pstate)
 		return slot;
 	}
 
+	if (node->hashtable)
+		UpdateTupleHashTableStats(node->hashtable, false);
+
 	return NULL;
 }
 
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index bfd148a..9c0e0ab 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -415,6 +415,7 @@ setop_fill_hash_table(SetOpState *setopstate)
 
 	setopstate->table_filled = true;
 	/* Initialize to walk the hash table */
+	UpdateTupleHashTableStats(setopstate->hashtable, false);
 	ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
 }
 
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index ff95317..eec849c 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -621,6 +621,9 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 	ExecClearTuple(node->projRight->pi_state.resultslot);
 
 	MemoryContextSwitchTo(oldcontext);
+	UpdateTupleHashTableStats(node->hashtable, false);
+	if (node->hashnulls)
+		UpdateTupleHashTableStats(node->hashnulls, false);
 }
 
 /*
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 81fdfa4..34199b5 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -150,6 +150,7 @@ extern TupleHashEntry FindTupleHashEntry(TupleHashTable hashtable,
 										 ExprState *eqcomp,
 										 FmgrInfo *hashfunctions);
 extern void ResetTupleHashTable(TupleHashTable hashtable);
+extern void UpdateTupleHashTableStats(TupleHashTable hashtable, bool initial);
 
 /*
  * prototypes from functions in execJunk.c
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 5d5b38b..f929585 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -688,6 +688,15 @@ typedef struct TupleHashEntryData
 #define SH_DECLARE
 #include "lib/simplehash.h"
 
+/* XXX: not to be confused with struct HashInstrumentation... */
+typedef struct hash_instrumentation
+{
+	size_t	nbuckets;				/* number of buckets at end of execution */
+	size_t	nbuckets_original;		/* planned number of buckets */
+	size_t	space_peak_hash;	/* peak memory usage in bytes */
+	size_t	space_peak_tuples;	/* peak memory usage in bytes */
+} hash_instrumentation;
+
 typedef struct TupleHashTableData
 {
 	tuplehash_hash *hashtab;	/* underlying hash table */
@@ -706,6 +715,7 @@ typedef struct TupleHashTableData
 	ExprState  *cur_eq_func;	/* comparator for input vs. table */
 	uint32		hash_iv;		/* hash-function IV */
 	ExprContext *exprcontext;	/* expression context */
+	hash_instrumentation instrument;
 }			TupleHashTableData;
 
 typedef tuplehash_iterator TupleHashIterator;
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index b3dcbaa..7f1c1e1 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -2356,6 +2356,7 @@ explain (costs off, timing off, summary off, analyze)
          Buckets: 1024  Batches: 1  Memory Usage: 16kB
          ->  HashAggregate (actual rows=200 loops=1)
                Group Key: onek.twothousand, onek.twothousand
+               Buckets: 256
                ->  Seq Scan on onek (actual rows=1000 loops=1)
-(10 rows)
+(11 rows)
 
diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out
index 7348f39..092225b 100644
--- a/src/test/regress/expected/groupingsets.out
+++ b/src/test/regress/expected/groupingsets.out
@@ -982,9 +982,11 @@ explain (costs off, timing off, summary off, analyze) select a, b, grouping(a,b)
    Sort Method: quicksort  Memory: 25kB
    ->  HashAggregate (actual rows=8 loops=1)
          Hash Key: "*VALUES*".column1
+         Buckets: 16
          Hash Key: "*VALUES*".column2
+         Buckets: 16
          ->  Values Scan on "*VALUES*" (actual rows=10 loops=1)
-(7 rows)
+(9 rows)
 
 select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by cube(a,b) order by 3,1,2;
@@ -1017,11 +1019,14 @@ explain (costs off, timing off, summary off, analyze) select a, b, grouping(a,b)
    Sort Method: quicksort  Memory: 26kB
    ->  MixedAggregate (actual rows=16 loops=1)
          Hash Key: "*VALUES*".column1, "*VALUES*".column2
+         Buckets: 16
          Hash Key: "*VALUES*".column1
+         Buckets: 16
          Hash Key: "*VALUES*".column2
+         Buckets: 16
          Group Key: ()
          ->  Values Scan on "*VALUES*" (actual rows=10 loops=1)
-(9 rows)
+(12 rows)
 
 -- shouldn't try and hash
 explain (costs off, timing off, summary off, analyze)
@@ -1082,12 +1087,13 @@ explain (costs off, timing off, summary off, analyze)
    Sort Method: quicksort  Memory: 25kB
    ->  MixedAggregate (actual rows=6 loops=1)
          Hash Key: unsortable_col
+         Buckets: 256
          Group Key: unhashable_col
          ->  Sort (actual rows=8 loops=1)
                Sort Key: unhashable_col
                Sort Method: quicksort  Memory: 25kB
                ->  Seq Scan on gstest4 (actual rows=8 loops=1)
-(10 rows)
+(11 rows)
 
 select unhashable_col, unsortable_col,
        grouping(unhashable_col, unsortable_col),
@@ -1127,12 +1133,13 @@ explain (costs off, timing off, summary off, analyze)
    Sort Method: quicksort  Memory: 26kB
    ->  MixedAggregate (actual rows=16 loops=1)
          Hash Key: v, unsortable_col
+         Buckets: 256
          Group Key: v, unhashable_col
          ->  Sort (actual rows=8 loops=1)
                Sort Key: v, unhashable_col
                Sort Method: quicksort  Memory: 25kB
                ->  Seq Scan on gstest4 (actual rows=8 loops=1)
-(10 rows)
+(11 rows)
 
 -- empty input: first is 0 rows, second 1, third 3 etc.
 select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a);
@@ -1146,9 +1153,11 @@ explain (costs off, timing off, summary off, analyze)
 --------------------------------------------------------
  HashAggregate (actual rows=0 loops=1)
    Hash Key: a, b
+   Buckets: 256
    Hash Key: a
+   Buckets: 256
    ->  Seq Scan on gstest_empty (actual rows=0 loops=1)
-(4 rows)
+(6 rows)
 
 select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),());
  a | b | sum | count 
@@ -1170,11 +1179,12 @@ explain (costs off, timing off, summary off, analyze)
 --------------------------------------------------------
  MixedAggregate (actual rows=3 loops=1)
    Hash Key: a, b
+   Buckets: 256
    Group Key: ()
    Group Key: ()
    Group Key: ()
    ->  Seq Scan on gstest_empty (actual rows=0 loops=1)
-(6 rows)
+(7 rows)
 
 select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
  sum | count 
@@ -1215,9 +1225,11 @@ explain (costs off, timing off, summary off, analyze)
 ---------------------------------------------------
  HashAggregate (actual rows=4 loops=1)
    Hash Key: a, b
+   Buckets: 4 (originally 2)
    Hash Key: a, c
+   Buckets: 4 (originally 2)
    ->  Seq Scan on gstest3 (actual rows=2 loops=1)
-(4 rows)
+(6 rows)
 
 -- simple rescan tests
 select a, b, sum(v.x)
@@ -1245,11 +1257,13 @@ explain (costs off, timing off, summary off, analyze)
    Sort Method: quicksort  Memory: 25kB
    ->  HashAggregate (actual rows=5 loops=1)
          Hash Key: gstest_data.a
+         Buckets: 256
          Hash Key: gstest_data.b
+         Buckets: 256
          ->  Nested Loop (actual rows=6 loops=1)
                ->  Values Scan on "*VALUES*" (actual rows=2 loops=1)
                ->  Function Scan on gstest_data (actual rows=3 loops=2)
-(9 rows)
+(11 rows)
 
 select *
   from (values (1),(2)) v(x),
@@ -1302,10 +1316,13 @@ explain (costs off, timing off, summary off, analyze)
    Sort Method: quicksort  Memory: 26kB
    ->  HashAggregate (actual rows=21 loops=1)
          Hash Key: "*VALUES*".column1, "*VALUES*".column2
+         Buckets: 16
          Hash Key: ("*VALUES*".column1 + 1), ("*VALUES*".column2 + 1)
+         Buckets: 16
          Hash Key: ("*VALUES*".column1 + 2), ("*VALUES*".column2 + 2)
+         Buckets: 16
          ->  Values Scan on "*VALUES*" (actual rows=10 loops=1)
-(8 rows)
+(11 rows)
 
 select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
   from gstest2 group by cube (a,b) order by rsum, a, b;
@@ -1335,11 +1352,14 @@ explain (costs off, timing off, summary off, analyze)
                Sort Method: quicksort  Memory: 25kB
                ->  MixedAggregate (actual rows=8 loops=1)
                      Hash Key: a, b
+                     Buckets: 256
                      Hash Key: a
+                     Buckets: 256
                      Hash Key: b
+                     Buckets: 256
                      Group Key: ()
                      ->  Seq Scan on gstest2 (actual rows=9 loops=1)
-(13 rows)
+(16 rows)
 
 select a, b, sum(v.x)
   from (values (1),(2)) v(x), gstest_data(v.x)
@@ -1371,13 +1391,16 @@ explain (costs off, timing off, summary off, analyze)
    Sort Method: quicksort  Memory: 25kB
    ->  MixedAggregate (actual rows=12 loops=1)
          Hash Key: gstest_data.a, gstest_data.b
+         Buckets: 256
          Hash Key: gstest_data.a
+         Buckets: 256
          Hash Key: gstest_data.b
+         Buckets: 256
          Group Key: ()
          ->  Nested Loop (actual rows=6 loops=1)
                ->  Values Scan on "*VALUES*" (actual rows=2 loops=1)
                ->  Function Scan on gstest_data (actual rows=3 loops=2)
-(11 rows)
+(14 rows)
 
 -- Verify that we correctly handle the child node returning a
 -- non-minimal slot, which happens if the input is pre-sorted,
@@ -1572,9 +1595,13 @@ explain (costs off, timing off, summary off, analyze)
 -----------------------------------------------------------
  MixedAggregate (actual rows=13116 loops=1)
    Hash Key: two
+   Buckets: 4 (originally 2)
    Hash Key: four
+   Buckets: 4 (originally 2)
    Hash Key: ten
+   Buckets: 4
    Hash Key: hundred
+   Buckets: 16
    Group Key: unique1
    Sort Key: twothousand
      Group Key: twothousand
@@ -1584,7 +1611,7 @@ explain (costs off, timing off, summary off, analyze)
          Sort Key: unique1
          Sort Method: external merge  Disk: 392kB
          ->  Seq Scan on tenk1 (actual rows=10000 loops=1)
-(14 rows)
+(18 rows)
 
 explain (costs off, timing off, summary off, analyze)
   select unique1,
@@ -1596,15 +1623,19 @@ explain (costs off, timing off, summary off, analyze)
 -----------------------------------------------------------
  MixedAggregate (actual rows=10116 loops=1)
    Hash Key: two
+   Buckets: 4 (originally 2)
    Hash Key: four
+   Buckets: 4 (originally 2)
    Hash Key: ten
+   Buckets: 4
    Hash Key: hundred
+   Buckets: 16
    Group Key: unique1
    ->  Sort (actual rows=10000 loops=1)
          Sort Key: unique1
          Sort Method: external merge  Disk: 392kB
          ->  Seq Scan on tenk1 (actual rows=10000 loops=1)
-(10 rows)
+(14 rows)
 
 set work_mem = '384kB';
 explain (costs off, timing off, summary off, analyze)
@@ -1617,10 +1648,15 @@ explain (costs off, timing off, summary off, analyze)
 -----------------------------------------------------------
  MixedAggregate (actual rows=13116 loops=1)
    Hash Key: two
+   Buckets: 4 (originally 2)
    Hash Key: four
+   Buckets: 4 (originally 2)
    Hash Key: ten
+   Buckets: 4
    Hash Key: hundred
+   Buckets: 16
    Hash Key: thousand
+   Buckets: 128
    Group Key: unique1
    Sort Key: twothousand
      Group Key: twothousand
@@ -1628,7 +1664,7 @@ explain (costs off, timing off, summary off, analyze)
          Sort Key: unique1
          Sort Method: external merge  Disk: 392kB
          ->  Seq Scan on tenk1 (actual rows=10000 loops=1)
-(13 rows)
+(18 rows)
 
 -- check collation-sensitive matching between grouping expressions
 -- (similar to a check for aggregates, but there are additional code
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index 94cf969..783c1da 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -306,7 +306,9 @@ explain (costs off, timing off, summary off, analyze)
                        ->  Seq Scan on tenk2 (actual rows=8990 loops=5)
                              Filter: (thousand > 100)
                              Rows Removed by Filter: 1010
-(11 rows)
+                     Buckets: 16384
+                     Null hashtable: Buckets: 1024
+(13 rows)
 
 select count(*) from tenk1 where (two, four) not in
 	(select hundred, thousand from tenk2 where thousand > 100);
diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out
index 55991c8..410daa0 100644
--- a/src/test/regress/expected/subselect.out
+++ b/src/test/regress/expected/subselect.out
@@ -791,7 +791,9 @@ select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
      ->  Append (actual rows=2 loops=1)
            ->  Result (actual rows=1 loops=1)
            ->  Result (actual rows=1 loops=1)
-(5 rows)
+   Buckets: 4 (originally 2)
+   Null hashtable: Buckets: 2
+(7 rows)
 
 select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
  ?column? 
@@ -999,7 +1001,9 @@ select * from int4_tbl where
    SubPlan 1
      ->  Index Only Scan using tenk1_unique1 on tenk1 a (actual rows=10000 loops=1)
            Heap Fetches: 0
-(8 rows)
+   Buckets: 16384
+   Null hashtable: Buckets: 2
+(10 rows)
 
 select * from int4_tbl where
   (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in
diff --git a/src/test/regress/expected/union.out b/src/test/regress/expected/union.out
index dcd51a7..d36981a 100644
--- a/src/test/regress/expected/union.out
+++ b/src/test/regress/expected/union.out
@@ -355,13 +355,14 @@ select count(*) from
  Aggregate (actual rows=1 loops=1)
    ->  Subquery Scan on ss (actual rows=5000 loops=1)
          ->  HashSetOp Intersect (actual rows=5000 loops=1)
+               Buckets: 8192
                ->  Append (actual rows=20000 loops=1)
                      ->  Subquery Scan on "*SELECT* 2" (actual rows=10000 loops=1)
                            ->  Seq Scan on tenk1 (actual rows=10000 loops=1)
                      ->  Subquery Scan on "*SELECT* 1" (actual rows=10000 loops=1)
                            ->  Index Only Scan using tenk1_unique1 on tenk1 tenk1_1 (actual rows=10000 loops=1)
                                  Heap Fetches: 0
-(9 rows)
+(10 rows)
 
 select count(*) from
   ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
@@ -588,12 +589,13 @@ select from generate_series(1,5) intersect select from generate_series(1,3);
                                           QUERY PLAN                                          
 ----------------------------------------------------------------------------------------------
  HashSetOp Intersect (actual rows=1 loops=1)
+   Buckets: 4 (originally 2)
    ->  Append (actual rows=8 loops=1)
          ->  Subquery Scan on "*SELECT* 1" (actual rows=5 loops=1)
                ->  Function Scan on generate_series (actual rows=5 loops=1)
          ->  Subquery Scan on "*SELECT* 2" (actual rows=3 loops=1)
                ->  Function Scan on generate_series generate_series_1 (actual rows=3 loops=1)
-(6 rows)
+(7 rows)
 
 select from generate_series(1,5) union select from generate_series(1,3);
 --
diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql
index 49d44e2..b8b1c8c 100644
--- a/src/test/regress/sql/select_parallel.sql
+++ b/src/test/regress/sql/select_parallel.sql
@@ -114,7 +114,7 @@ explain (costs off, timing off, summary off, analyze)
 select count(*) from tenk1 where (two, four) not in
 	(select hundred, thousand from tenk2 where thousand > 100);
 -- this is not parallel-safe due to use of random() within SubLink's testexpr:
-explain (costs off, timing off, summary off, analyze)
+explain (costs off)
 	select * from tenk1 where (unique1 + random())::integer not in
 	(select ten from tenk2);
 alter table tenk2 reset (parallel_workers);
-- 
2.7.4

>From 9aef7906903285cedb3daae9085b97eb18fb0a22 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sat, 15 Feb 2020 14:13:06 -0600
Subject: [PATCH v2 3/7] Gross hack to put hash stats of subplans in the
 right(?) place

---
 src/backend/commands/explain.c                | 46 +++++++++++++++------------
 src/test/regress/expected/select_parallel.out |  4 +--
 src/test/regress/expected/subselect.out       |  8 ++---
 3 files changed, 32 insertions(+), 26 deletions(-)

diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index e262108..67a9840 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -66,7 +66,7 @@ static double elapsed_time(instr_time *starttime);
 static bool ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used);
 static void ExplainNode(PlanState *planstate, List *ancestors,
 						const char *relationship, const char *plan_name,
-						ExplainState *es);
+						ExplainState *es, SubPlanState *subplanstate);
 static void show_plan_tlist(PlanState *planstate, List *ancestors,
 							ExplainState *es);
 static void show_expression(Node *node, const char *qlabel,
@@ -718,7 +718,7 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
 		ps = outerPlanState(ps);
 		es->hide_workers = true;
 	}
-	ExplainNode(ps, NIL, NULL, NULL, es);
+	ExplainNode(ps, NIL, NULL, NULL, es, NULL);
 
 	/*
 	 * If requested, include information about GUC parameters with values that
@@ -1080,7 +1080,7 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used)
 static void
 ExplainNode(PlanState *planstate, List *ancestors,
 			const char *relationship, const char *plan_name,
-			ExplainState *es)
+			ExplainState *es, SubPlanState *subplanstate)
 {
 	Plan	   *plan = planstate->plan;
 	const char *pname;			/* node type name for text output */
@@ -1337,6 +1337,16 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			ExplainIndentText(es);
 			appendStringInfo(es->str, "%s\n", plan_name);
 			es->indent++;
+
+			Assert(subplanstate != NULL);
+			/* Show hash stats for hashed subplan */
+			if (subplanstate->hashtable)
+				show_tuplehash_info(&subplanstate->hashtable->instrument, es);
+			if (subplanstate->hashnulls) {
+				ExplainIndentText(es);
+				appendStringInfoString(es->str, "Null hashtable: ");
+				show_tuplehash_info(&subplanstate->hashnulls->instrument, es);
+			}
 		}
 		if (es->indent)
 		{
@@ -1365,6 +1375,13 @@ ExplainNode(PlanState *planstate, List *ancestors,
 		if (custom_name)
 			ExplainPropertyText("Custom Plan Provider", custom_name, es);
 		ExplainPropertyBool("Parallel Aware", plan->parallel_aware, es);
+		if (subplanstate && subplanstate->hashtable)
+			show_tuplehash_info(&subplanstate->hashtable->instrument, es);
+		if (subplanstate && subplanstate->hashnulls) {
+			ExplainOpenGroup("Null hashtable", "Null hashtable", true, es);
+			show_tuplehash_info(&subplanstate->hashnulls->instrument, es);
+			ExplainCloseGroup("Null hashtable", "Null hashtable", true, es);
+		}
 	}
 
 	switch (nodeTag(plan))
@@ -2037,12 +2054,12 @@ ExplainNode(PlanState *planstate, List *ancestors,
 	/* lefttree */
 	if (outerPlanState(planstate))
 		ExplainNode(outerPlanState(planstate), ancestors,
-					"Outer", NULL, es);
+					"Outer", NULL, es, NULL);
 
 	/* righttree */
 	if (innerPlanState(planstate))
 		ExplainNode(innerPlanState(planstate), ancestors,
-					"Inner", NULL, es);
+					"Inner", NULL, es, NULL);
 
 	/* special child plans */
 	switch (nodeTag(plan))
@@ -2074,7 +2091,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			break;
 		case T_SubqueryScan:
 			ExplainNode(((SubqueryScanState *) planstate)->subplan, ancestors,
-						"Subquery", NULL, es);
+						"Subquery", NULL, es, NULL);
 			break;
 		case T_CustomScan:
 			ExplainCustomChildren((CustomScanState *) planstate,
@@ -3473,7 +3490,7 @@ ExplainMemberNodes(PlanState **planstates, int nplans,
 
 	for (j = 0; j < nplans; j++)
 		ExplainNode(planstates[j], ancestors,
-					"Member", NULL, es);
+					"Member", NULL, es, NULL);
 }
 
 /*
@@ -3531,18 +3548,7 @@ ExplainSubPlans(List *plans, List *ancestors,
 		ancestors = lcons(sp, ancestors);
 
 		ExplainNode(sps->planstate, ancestors,
-					relationship, sp->plan_name, es);
-		if (sps->hashtable)
-			show_tuplehash_info(&sps->hashtable->instrument, es);
-		if (sps->hashnulls) {
-			ExplainOpenGroup("Null hashtable", "Null hashtable", true, es);
-			if (es->format == EXPLAIN_FORMAT_TEXT) {
-				ExplainIndentText(es);
-				appendStringInfoString(es->str, "Null hashtable: ");
-			}
-			show_tuplehash_info(&sps->hashnulls->instrument, es);
-			ExplainCloseGroup("Null hashtable", "Null hashtable", true, es);
-		}
+					relationship, sp->plan_name, es, sps);
 
 		ancestors = list_delete_first(ancestors);
 	}
@@ -3559,7 +3565,7 @@ ExplainCustomChildren(CustomScanState *css, List *ancestors, ExplainState *es)
 	(list_length(css->custom_ps) != 1 ? "children" : "child");
 
 	foreach(cell, css->custom_ps)
-		ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es);
+		ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es, NULL);
 }
 
 /*
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index 783c1da..bc270e0 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -303,11 +303,11 @@ explain (costs off, timing off, summary off, analyze)
                ->  Parallel Seq Scan on tenk1 (actual rows=2000 loops=5)
                      Filter: (NOT (hashed SubPlan 1))
                      SubPlan 1
+                       Buckets: 16384
+                       Null hashtable: Buckets: 1024
                        ->  Seq Scan on tenk2 (actual rows=8990 loops=5)
                              Filter: (thousand > 100)
                              Rows Removed by Filter: 1010
-                     Buckets: 16384
-                     Null hashtable: Buckets: 1024
 (13 rows)
 
 select count(*) from tenk1 where (two, four) not in
diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out
index 410daa0..a6b9595 100644
--- a/src/test/regress/expected/subselect.out
+++ b/src/test/regress/expected/subselect.out
@@ -788,11 +788,11 @@ select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
 ----------------------------------------------
  Result (actual rows=1 loops=1)
    SubPlan 1
+     Buckets: 4 (originally 2)
+     Null hashtable: Buckets: 2
      ->  Append (actual rows=2 loops=1)
            ->  Result (actual rows=1 loops=1)
            ->  Result (actual rows=1 loops=1)
-   Buckets: 4 (originally 2)
-   Null hashtable: Buckets: 2
 (7 rows)
 
 select 'foo'::text in (select 'bar'::name union all select 'bar'::name);
@@ -999,10 +999,10 @@ select * from int4_tbl where
    ->  Seq Scan on int4_tbl (actual rows=5 loops=1)
    ->  Seq Scan on tenk1 b (actual rows=8000 loops=5)
    SubPlan 1
+     Buckets: 16384
+     Null hashtable: Buckets: 2
      ->  Index Only Scan using tenk1_unique1 on tenk1 a (actual rows=10000 loops=1)
            Heap Fetches: 0
-   Buckets: 16384
-   Null hashtable: Buckets: 2
 (10 rows)
 
 select * from int4_tbl where
-- 
2.7.4

>From 86939c5c0c8c835a0cd50d4a32dda0f08413c363 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Wed, 12 Feb 2020 23:40:45 -0600
Subject: [PATCH v2 4/7] implement hash stats for bitmapHeapScan..

TIDBitmap is a private structure, so add an accessor function to return its
instrumentation, and duplicate instrumentation struct in BitmapHeapState.

The instrumentation itself could be implemented in simplehash.h.  But I think
the higher layer BitmapHeapScan would have to include an instrumentation struct
anyway, since explain.c cannot look into tbm->pagetable to get .instrument (and
the pagetable structure itself doesn't match tuplehash).

Also, if instrumentation were implemented in simplehash.h, I think every
insertion or deletion would need to check ->members and ->size (which isn't
necessary for Agg, but is necessary in the general case, and specifically for
tidbitmap, since it actually DELETEs hashtable entries).  Or else simplehash
would need a new function like UpdateTupleHashStats, which the higher level nodes
would need to call after filling the hashtable or before deleting tuples, which
seems to defeat the purpose of implementing stats at a lower layer.
---
 src/backend/commands/explain.c            |  2 ++
 src/backend/executor/nodeBitmapHeapscan.c |  3 +++
 src/backend/nodes/tidbitmap.c             | 20 ++++++++++++++++++++
 src/include/nodes/execnodes.h             |  1 +
 src/include/nodes/tidbitmap.h             |  4 ++++
 5 files changed, 30 insertions(+)

diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 67a9840..d71f5f1 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -2908,6 +2908,8 @@ show_tidbitmap_info(BitmapHeapScanState *planstate, ExplainState *es)
 			appendStringInfoChar(es->str, '\n');
 		}
 	}
+
+	show_tuplehash_info(&planstate->instrument, es);
 }
 
 /*
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index ae8a11d..9ae99a3 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -182,6 +182,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
 #endif							/* USE_PREFETCH */
 		}
 		node->initialized = true;
+		if (node->tbm)
+			node->instrument = *tbm_instrumentation(node->tbm);
 	}
 
 	for (;;)
@@ -744,6 +746,7 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
 	scanstate->shared_tbmiterator = NULL;
 	scanstate->shared_prefetch_iterator = NULL;
 	scanstate->pstate = NULL;
+	memset(&scanstate->instrument, 0, sizeof(scanstate->instrument));
 
 	/*
 	 * We can potentially skip fetching heap pages if we do not need any
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index e102589..d1ef07c 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -43,6 +43,7 @@
 #include "access/htup_details.h"
 #include "nodes/bitmapset.h"
 #include "nodes/tidbitmap.h"
+#include "nodes/execnodes.h"
 #include "storage/lwlock.h"
 #include "utils/dsa.h"
 #include "utils/hashutils.h"
@@ -166,6 +167,7 @@ struct TIDBitmap
 	dsa_pointer ptpages;		/* dsa_pointer to the page array */
 	dsa_pointer ptchunks;		/* dsa_pointer to the chunk array */
 	dsa_area   *dsa;			/* reference to per-query dsa area */
+	hash_instrumentation instrument;	/* Returned by accessor function */
 };
 
 /*
@@ -294,6 +296,7 @@ tbm_create_pagetable(TIDBitmap *tbm)
 	Assert(tbm->pagetable == NULL);
 
 	tbm->pagetable = pagetable_create(tbm->mcxt, 128, tbm);
+	tbm->instrument.nbuckets_original = tbm->pagetable->size;
 
 	/* If entry1 is valid, push it into the hashtable */
 	if (tbm->status == TBM_ONE_PAGE)
@@ -1148,6 +1151,23 @@ tbm_end_iterate(TBMIterator *iterator)
 }
 
 /*
+ * tbm_instrumentation - return pointer instrumentation data
+ *
+ * Returned data is within the iterator's tbm, and destroyed with it.
+ */
+hash_instrumentation *
+tbm_instrumentation(TIDBitmap *tbm)
+{
+	if (tbm->pagetable) {
+		tbm->instrument.nbuckets = tbm->pagetable->size;
+		tbm->instrument.space_peak_hash = sizeof(PagetableEntry) * tbm->pagetable->size;
+		tbm->instrument.space_peak_tuples = sizeof(BlockNumber) * (tbm->nchunks ? tbm->maxentries : tbm->pagetable->members);
+	}
+
+	return &tbm->instrument;
+}
+
+/*
  * tbm_end_shared_iterate - finish a shared iteration over a TIDBitmap
  *
  * This doesn't free any of the shared state associated with the iterator,
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index f929585..f5740c7 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -1607,6 +1607,7 @@ typedef struct BitmapHeapScanState
 	TBMSharedIterator *shared_tbmiterator;
 	TBMSharedIterator *shared_prefetch_iterator;
 	ParallelBitmapHeapState *pstate;
+	hash_instrumentation	instrument;
 } BitmapHeapScanState;
 
 /* ----------------
diff --git a/src/include/nodes/tidbitmap.h b/src/include/nodes/tidbitmap.h
index d562fca..811a497 100644
--- a/src/include/nodes/tidbitmap.h
+++ b/src/include/nodes/tidbitmap.h
@@ -26,6 +26,9 @@
 #include "utils/dsa.h"
 
 
+/* Forward decl */
+typedef struct hash_instrumentation hash_instrumentation;
+
 /*
  * Actual bitmap representation is private to tidbitmap.c.  Callers can
  * do IsA(x, TIDBitmap) on it, but nothing else.
@@ -71,5 +74,6 @@ extern void tbm_end_shared_iterate(TBMSharedIterator *iterator);
 extern TBMSharedIterator *tbm_attach_shared_iterate(dsa_area *dsa,
 													dsa_pointer dp);
 extern long tbm_calculate_entries(double maxbytes);
+extern hash_instrumentation *tbm_instrumentation(TIDBitmap *tbm);
 
 #endif							/* TIDBITMAP_H */
-- 
2.7.4

>From ab4b5443fe597beafc4a28b0a8c5817f4670b67b Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sun, 9 Feb 2020 15:08:14 -0600
Subject: [PATCH v2 5/7] Refactor for consistency/symmetry

This moves hash instrumentation out of execGrouping.c / TupleHashTable and into
higher level nodes, for consistency with bitmapHeapScan.
---
 src/backend/commands/explain.c            | 18 +++++++++---------
 src/backend/executor/execGrouping.c       | 27 ---------------------------
 src/backend/executor/nodeAgg.c            |  9 +++++++--
 src/backend/executor/nodeRecursiveunion.c |  3 ++-
 src/backend/executor/nodeSetOp.c          |  3 ++-
 src/backend/executor/nodeSubplan.c        | 11 ++++++++---
 src/include/executor/executor.h           |  1 -
 src/include/executor/nodeAgg.h            |  1 +
 src/include/nodes/execnodes.h             | 22 +++++++++++++++++++++-
 9 files changed, 50 insertions(+), 45 deletions(-)

diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index d71f5f1..1415bce 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -1341,11 +1341,11 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			Assert(subplanstate != NULL);
 			/* Show hash stats for hashed subplan */
 			if (subplanstate->hashtable)
-				show_tuplehash_info(&subplanstate->hashtable->instrument, es);
+				show_tuplehash_info(&subplanstate->instrument, es);
 			if (subplanstate->hashnulls) {
 				ExplainIndentText(es);
 				appendStringInfoString(es->str, "Null hashtable: ");
-				show_tuplehash_info(&subplanstate->hashnulls->instrument, es);
+				show_tuplehash_info(&subplanstate->instrument_nulls, es);
 			}
 		}
 		if (es->indent)
@@ -1376,10 +1376,10 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			ExplainPropertyText("Custom Plan Provider", custom_name, es);
 		ExplainPropertyBool("Parallel Aware", plan->parallel_aware, es);
 		if (subplanstate && subplanstate->hashtable)
-			show_tuplehash_info(&subplanstate->hashtable->instrument, es);
+			show_tuplehash_info(&subplanstate->instrument, es);
 		if (subplanstate && subplanstate->hashnulls) {
 			ExplainOpenGroup("Null hashtable", "Null hashtable", true, es);
-			show_tuplehash_info(&subplanstate->hashnulls->instrument, es);
+			show_tuplehash_info(&subplanstate->instrument_nulls, es);
 			ExplainCloseGroup("Null hashtable", "Null hashtable", true, es);
 		}
 	}
@@ -1911,14 +1911,14 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			{
 				SetOpState *sos = castNode(SetOpState, planstate);
 				if (sos->hashtable)
-					show_tuplehash_info(&sos->hashtable->instrument, es);
+					show_tuplehash_info(&sos->instrument, es);
 			}
 			break;
 		case T_RecursiveUnion:
 			{
 				RecursiveUnionState *rus = (RecursiveUnionState *)planstate;
 				if (rus->hashtable)
-					show_tuplehash_info(&rus->hashtable->instrument, es);
+					show_tuplehash_info(&rus->instrument, es);
 				break;
 			}
 		case T_Group:
@@ -2305,7 +2305,7 @@ show_agg_keys(AggState *astate, List *ancestors,
 								 ancestors, es);
 			Assert(astate->num_hashes<=1);
 			if (astate->num_hashes)
-				show_tuplehash_info(&astate->perhash[0].hashtable->instrument, es);
+				show_tuplehash_info(&astate->perhash[0].instrument, es);
 		}
 
 		ancestors = list_delete_first(ancestors);
@@ -2332,7 +2332,7 @@ show_grouping_sets(AggState *aggstate, Agg *agg,
 	// this will show things twice??
 	show_grouping_set_keys(aggstate, agg, NULL,
 						   context, useprefix, ancestors, es,
-						   aggstate->num_hashes ? &aggstate->perhash[0].hashtable->instrument : NULL);
+						   aggstate->num_hashes ? &aggstate->perhash[0].instrument : NULL);
 
 	foreach(lc, agg->chain)
 	{
@@ -2344,7 +2344,7 @@ show_grouping_sets(AggState *aggstate, Agg *agg,
 				aggnode->aggstrategy == AGG_MIXED) {
 			int	nth = list_cell_number(agg->chain, lc);
 			Assert(nth < aggstate->num_hashes);
-			inst = &aggstate->perhash[nth].hashtable->instrument;
+			inst = &aggstate->perhash[nth].instrument;
 		}
 		else
 			inst = NULL;
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 5641b3f..de0205f 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -191,7 +191,6 @@ BuildTupleHashTableExt(PlanState *parent,
 	hashtable->inputslot = NULL;
 	hashtable->in_hash_funcs = NULL;
 	hashtable->cur_eq_func = NULL;
-	memset(&hashtable->instrument, 0, sizeof(hashtable->instrument));
 
 	/*
 	 * If parallelism is in use, even if the master backend is performing the
@@ -207,7 +206,6 @@ BuildTupleHashTableExt(PlanState *parent,
 		hashtable->hash_iv = 0;
 
 	hashtable->hashtab = tuplehash_create(metacxt, nbuckets, hashtable);
-	UpdateTupleHashTableStats(hashtable, true);
 
 	/*
 	 * We copy the input tuple descriptor just for safety --- we assume all
@@ -286,34 +284,9 @@ BuildTupleHashTable(PlanState *parent,
 void
 ResetTupleHashTable(TupleHashTable hashtable)
 {
-	UpdateTupleHashTableStats(hashtable, false);
 	tuplehash_reset(hashtable->hashtab);
 }
 
-/* Update instrumentation stats */
-void
-UpdateTupleHashTableStats(TupleHashTable hashtable, bool initial)
-{
-	hashtable->instrument.nbuckets = hashtable->hashtab->size;
-	if (initial) {
-		hashtable->instrument.nbuckets_original = hashtable->hashtab->size;
-		hashtable->instrument.space_peak_hash = hashtable->hashtab->size * sizeof(TupleHashEntryData);
-		hashtable->instrument.space_peak_tuples = 0;
-	}
-	else
-	{
-#define maxself(a,b) a=Max(a,b)
-		/* hashtable->entrysize includes additionalsize */
-		maxself(hashtable->instrument.space_peak_hash,
-				hashtable->hashtab->size * sizeof(TupleHashEntryData) +
-				hashtable->hashtab->members * (hashtable->entrysize - sizeof(TupleHashEntryData)));
-
-		maxself(hashtable->instrument.space_peak_tuples,
-				hashtable->hashtab->members * sizeof(MinimalTuple));
-#undef maxself
-	}
-}
-
 /*
  * Find or create a hashtable entry for the tuple group containing the
  * given tuple.  The tuple must be the same type as the hashtable entries.
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 170dfc7..4008e27 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1268,6 +1268,7 @@ build_hash_table(AggState *aggstate)
 		if (perhash->hashtable)
 			ResetTupleHashTable(perhash->hashtable);
 		else
+		{
 			perhash->hashtable = BuildTupleHashTableExt(&aggstate->ss.ps,
 														perhash->hashslot->tts_tupleDescriptor,
 														perhash->numCols,
@@ -1281,6 +1282,8 @@ build_hash_table(AggState *aggstate)
 														aggstate->hashcontext->ecxt_per_tuple_memory,
 														tmpmem,
 														DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
+			InitTupleHashTableStats(perhash->instrument, perhash->hashtable->hashtab, additionalsize);
+		}
 	}
 }
 
@@ -1666,7 +1669,7 @@ agg_retrieve_direct(AggState *aggstate)
 				 */
 				initialize_phase(aggstate, 0);
 				aggstate->table_filled = true;
-				UpdateTupleHashTableStats(aggstate->perhash[0].hashtable, false);
+				UpdateTupleHashTableStats(aggstate->perhash[0].instrument, aggstate->perhash[0].hashtable->hashtab);
 				ResetTupleHashIterator(aggstate->perhash[0].hashtable,
 									   &aggstate->perhash[0].hashiter);
 				select_current_set(aggstate, 0, true);
@@ -1910,6 +1913,7 @@ agg_fill_hash_table(AggState *aggstate)
 {
 	TupleTableSlot *outerslot;
 	ExprContext *tmpcontext = aggstate->tmpcontext;
+	AggStatePerHash	perhash = &aggstate->perhash[aggstate->current_set];
 
 	/*
 	 * Process each outer-plan tuple, and then fetch the next one, until we
@@ -1938,7 +1942,8 @@ agg_fill_hash_table(AggState *aggstate)
 	}
 
 	aggstate->table_filled = true;
-	UpdateTupleHashTableStats(aggstate->perhash[aggstate->current_set].hashtable, false);
+	UpdateTupleHashTableStats(perhash->instrument, perhash->hashtable->hashtab);
+
 	/* Initialize to walk the first hash table */
 	select_current_set(aggstate, 0, true);
 	ResetTupleHashIterator(aggstate->perhash[0].hashtable,
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index 93272c2..594abdb 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -50,6 +50,7 @@ build_hash_table(RecursiveUnionState *rustate)
 												rustate->tableContext,
 												rustate->tempContext,
 												false);
+	InitTupleHashTableStats(rustate->instrument, rustate->hashtable->hashtab, 0);
 }
 
 
@@ -157,7 +158,7 @@ ExecRecursiveUnion(PlanState *pstate)
 	}
 
 	if (node->hashtable)
-		UpdateTupleHashTableStats(node->hashtable, false);
+		UpdateTupleHashTableStats(node->instrument, node->hashtable->hashtab);
 
 	return NULL;
 }
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 9c0e0ab..4a56290 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -139,6 +139,7 @@ build_hash_table(SetOpState *setopstate)
 												   setopstate->tableContext,
 												   econtext->ecxt_per_tuple_memory,
 												   false);
+	InitTupleHashTableStats(setopstate->instrument, setopstate->hashtable->hashtab, 0);
 }
 
 /*
@@ -415,7 +416,7 @@ setop_fill_hash_table(SetOpState *setopstate)
 
 	setopstate->table_filled = true;
 	/* Initialize to walk the hash table */
-	UpdateTupleHashTableStats(setopstate->hashtable, false);
+	UpdateTupleHashTableStats(setopstate->instrument, setopstate->hashtable->hashtab);
 	ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
 }
 
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index eec849c..a5b71fa 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -507,6 +507,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 	if (node->hashtable)
 		ResetTupleHashTable(node->hashtable);
 	else
+	{
 		node->hashtable = BuildTupleHashTableExt(node->parent,
 												 node->descRight,
 												 ncols,
@@ -520,6 +521,8 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 												 node->hashtablecxt,
 												 node->hashtempcxt,
 												 false);
+		InitTupleHashTableStats(node->instrument, node->hashtable->hashtab, 0);
+	}
 
 	if (!subplan->unknownEqFalse)
 	{
@@ -534,7 +537,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 
 		if (node->hashnulls)
 			ResetTupleHashTable(node->hashtable);
-		else
+		else {
 			node->hashnulls = BuildTupleHashTableExt(node->parent,
 													 node->descRight,
 													 ncols,
@@ -548,6 +551,8 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 													 node->hashtablecxt,
 													 node->hashtempcxt,
 													 false);
+			InitTupleHashTableStats(node->instrument_nulls, node->hashnulls->hashtab, 0);
+		}
 	}
 
 	/*
@@ -621,9 +626,9 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 	ExecClearTuple(node->projRight->pi_state.resultslot);
 
 	MemoryContextSwitchTo(oldcontext);
-	UpdateTupleHashTableStats(node->hashtable, false);
+	UpdateTupleHashTableStats(node->instrument, node->hashtable->hashtab);
 	if (node->hashnulls)
-		UpdateTupleHashTableStats(node->hashnulls, false);
+		UpdateTupleHashTableStats(node->instrument_nulls, node->hashnulls->hashtab);
 }
 
 /*
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 34199b5..81fdfa4 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -150,7 +150,6 @@ extern TupleHashEntry FindTupleHashEntry(TupleHashTable hashtable,
 										 ExprState *eqcomp,
 										 FmgrInfo *hashfunctions);
 extern void ResetTupleHashTable(TupleHashTable hashtable);
-extern void UpdateTupleHashTableStats(TupleHashTable hashtable, bool initial);
 
 /*
  * prototypes from functions in execJunk.c
diff --git a/src/include/executor/nodeAgg.h b/src/include/executor/nodeAgg.h
index 264916f..008fda3 100644
--- a/src/include/executor/nodeAgg.h
+++ b/src/include/executor/nodeAgg.h
@@ -302,6 +302,7 @@ typedef struct AggStatePerHashData
 	AttrNumber *hashGrpColIdxInput; /* hash col indices in input slot */
 	AttrNumber *hashGrpColIdxHash;	/* indices in hash table tuples */
 	Agg		   *aggnode;		/* original Agg node, for numGroups etc. */
+	hash_instrumentation    instrument;
 }			AggStatePerHashData;
 
 
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index f5740c7..f8c93dd 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -688,9 +688,26 @@ typedef struct TupleHashEntryData
 #define SH_DECLARE
 #include "lib/simplehash.h"
 
+#define InitTupleHashTableStats(instr, htable, addsize) \
+	do{\
+	instr.additionalsize = addsize; \
+	instr.nbuckets = htable->size; \
+	instr.nbuckets_original = htable->size; \
+	instr.space_peak_hash = htable->size * sizeof(TupleHashEntryData); \
+	instr.space_peak_tuples = 0; \
+	}while(0)
+
+#define UpdateTupleHashTableStats(instr, htable) \
+	do{\
+	instr.nbuckets = htable->size; \
+	instr.space_peak_hash = Max(instr.space_peak_hash, htable->size * sizeof(TupleHashEntryData) + htable->members * instr.additionalsize); \
+	instr.space_peak_tuples = Max(instr.space_peak_tuples, htable->members * sizeof(MinimalTuple));\
+	}while(0)
+
 /* XXX: not to be confused with struct HashInstrumentation... */
 typedef struct hash_instrumentation
 {
+	size_t	additionalsize;				/* Includes additionalsize */
 	size_t	nbuckets;				/* number of buckets at end of execution */
 	size_t	nbuckets_original;		/* planned number of buckets */
 	size_t	space_peak_hash;	/* peak memory usage in bytes */
@@ -715,7 +732,6 @@ typedef struct TupleHashTableData
 	ExprState  *cur_eq_func;	/* comparator for input vs. table */
 	uint32		hash_iv;		/* hash-function IV */
 	ExprContext *exprcontext;	/* expression context */
-	hash_instrumentation instrument;
 }			TupleHashTableData;
 
 typedef tuplehash_iterator TupleHashIterator;
@@ -881,6 +897,8 @@ typedef struct SubPlanState
 	FmgrInfo   *lhs_hash_funcs; /* hash functions for lefthand datatype(s) */
 	FmgrInfo   *cur_eq_funcs;	/* equality functions for LHS vs. table */
 	ExprState  *cur_eq_comp;	/* equality comparator for LHS vs. table */
+	hash_instrumentation instrument;
+	hash_instrumentation instrument_nulls; /* instrumentation for nulls hashtable */
 } SubPlanState;
 
 /* ----------------
@@ -1289,6 +1307,7 @@ typedef struct RecursiveUnionState
 	MemoryContext tempContext;	/* short-term context for comparisons */
 	TupleHashTable hashtable;	/* hash table for tuples already seen */
 	MemoryContext tableContext; /* memory context containing hash table */
+	hash_instrumentation	instrument;
 } RecursiveUnionState;
 
 /* ----------------
@@ -2322,6 +2341,7 @@ typedef struct SetOpState
 	MemoryContext tableContext; /* memory context containing hash table */
 	bool		table_filled;	/* hash table filled yet? */
 	TupleHashIterator hashiter; /* for iterating through hash table */
+	hash_instrumentation instrument;
 } SetOpState;
 
 /* ----------------
-- 
2.7.4

>From 8722ffb2d673bd90c186ae45b3d450456e41fdfd Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sat, 15 Feb 2020 17:19:21 -0600
Subject: [PATCH v2 6/7] TupleHashTable.entrysize was unused except for
 instrumentation..

---
 src/backend/executor/execGrouping.c | 1 -
 src/include/nodes/execnodes.h       | 1 -
 2 files changed, 2 deletions(-)

diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index de0205f..d76a630 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -186,7 +186,6 @@ BuildTupleHashTableExt(PlanState *parent,
 	hashtable->tab_collations = collations;
 	hashtable->tablecxt = tablecxt;
 	hashtable->tempcxt = tempcxt;
-	hashtable->entrysize = entrysize;
 	hashtable->tableslot = NULL;	/* will be made on first lookup */
 	hashtable->inputslot = NULL;
 	hashtable->in_hash_funcs = NULL;
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index f8c93dd..2bcd140 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -724,7 +724,6 @@ typedef struct TupleHashTableData
 	Oid		   *tab_collations; /* collations for hash and comparison */
 	MemoryContext tablecxt;		/* memory context containing table */
 	MemoryContext tempcxt;		/* context for function evaluations */
-	Size		entrysize;		/* actual size to make each hash entry */
 	TupleTableSlot *tableslot;	/* slot for referencing table entries */
 	/* The following fields are set transiently for each table search: */
 	TupleTableSlot *inputslot;	/* current input tuple's slot */
-- 
2.7.4

>From 11cc4891d85fb9fd5dd157f3cd808f1494f580d9 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sat, 15 Feb 2020 15:53:34 -0600
Subject: [PATCH v2 7/7] Update comment obsolete since 69c3936a

---
 src/backend/executor/nodeAgg.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 4008e27..ab97a35 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1499,8 +1499,7 @@ lookup_hash_entry(AggState *aggstate)
 }
 
 /*
- * Look up hash entries for the current tuple in all hashed grouping sets,
- * returning an array of pergroup pointers suitable for advance_aggregates.
+ * Look up hash entries for the current tuple in all hashed grouping sets.
  *
  * Be aware that lookup_hash_entry can reset the tmpcontext.
  */
-- 
2.7.4

Reply via email to