Updated for new tests in 58c47ccfff20b8c125903482725c1dbfd30beade
and rebased.
>From 3e1904c6c36ee3ff4f56a2808c2400a3b2d2a0e5 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Tue, 31 Dec 2019 18:49:41 -0600
Subject: [PATCH v6 1/7] explain to show tuplehash bucket and memory stats..

Note that hashed SubPlan and recursiveUnion aren't affected in explain output,
probably since hashtables aren't allocated at that point.

Discussion: https://www.postgresql.org/message-id/flat/20200103161925.gm12...@telsasoft.com
---
 .../postgres_fdw/expected/postgres_fdw.out    |  56 +++++--
 src/backend/commands/explain.c                | 137 +++++++++++++++--
 src/backend/executor/execGrouping.c           |  27 ++++
 src/backend/executor/nodeAgg.c                |  11 ++
 src/backend/executor/nodeRecursiveunion.c     |   3 +
 src/backend/executor/nodeSetOp.c              |   1 +
 src/backend/executor/nodeSubplan.c            |   3 +
 src/include/executor/executor.h               |   1 +
 src/include/nodes/execnodes.h                 |   9 ++
 src/test/regress/expected/aggregates.out      |  36 +++--
 src/test/regress/expected/groupingsets.out    |  64 ++++++--
 src/test/regress/expected/join.out            |   3 +-
 src/test/regress/expected/matview.out         |   9 +-
 .../regress/expected/partition_aggregate.out  | 145 ++++++++++++++----
 src/test/regress/expected/partition_join.out  |  13 +-
 src/test/regress/expected/pg_lsn.out          |   3 +-
 src/test/regress/expected/select_distinct.out |   3 +-
 src/test/regress/expected/select_parallel.out |  11 +-
 src/test/regress/expected/subselect.out       |   9 +-
 src/test/regress/expected/tablesample.out     |   3 +-
 src/test/regress/expected/union.out           |  15 +-
 src/test/regress/expected/window.out          |   3 +-
 src/test/regress/expected/write_parallel.out  |  16 +-
 23 files changed, 473 insertions(+), 108 deletions(-)

diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 62c2697920..2ddae83178 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -2086,9 +2086,11 @@ SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2
          ->  HashAggregate
                Output: t1.c1, avg((t1.c1 + t2.c1))
                Group Key: t1.c1
+               Buckets: 256
                ->  HashAggregate
                      Output: t1.c1, t2.c1
                      Group Key: t1.c1, t2.c1
+                     Buckets: 4096
                      ->  Append
                            ->  Foreign Scan
                                  Output: t1.c1, t2.c1
@@ -2098,7 +2100,7 @@ SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2
                                  Output: t1_1.c1, t2_1.c1
                                  Relations: (public.ft1 t1_1) INNER JOIN (public.ft2 t2_1)
                                  Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1"))))
-(20 rows)
+(22 rows)
 
 SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) UNION SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1)) AS t (t1c1, t2c1) GROUP BY t1c1 ORDER BY t1c1 OFFSET 100 LIMIT 10;
  t1c1 |         avg          
@@ -2129,11 +2131,12 @@ SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM
          ->  HashAggregate
                Output: t2.c1, t3.c1
                Group Key: t2.c1, t3.c1
+               Buckets: 2
                ->  Foreign Scan
                      Output: t2.c1, t3.c1
                      Relations: (public.ft1 t2) INNER JOIN (public.ft2 t3)
                      Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")) AND ((r1.c2 = $1::integer))))
-(13 rows)
+(14 rows)
 
 SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
  C 1 
@@ -2610,10 +2613,11 @@ select c2 * (random() <= 1)::int as c2 from ft2 group by c2 * (random() <= 1)::i
    ->  HashAggregate
          Output: ((c2 * ((random() <= '1'::double precision))::integer))
          Group Key: (ft2.c2 * ((random() <= '1'::double precision))::integer)
+         Buckets: 2
          ->  Foreign Scan on public.ft2
                Output: (c2 * ((random() <= '1'::double precision))::integer)
                Remote SQL: SELECT c2 FROM "S 1"."T 1"
-(9 rows)
+(10 rows)
 
 -- GROUP BY clause in various forms, cardinal, alias and constant expression
 explain (verbose, costs off)
@@ -2713,11 +2717,12 @@ select sum(c1) from ft1 group by c2 having avg(c1 * (random() <= 1)::int) > 100
    ->  HashAggregate
          Output: sum(c1), c2
          Group Key: ft1.c2
+         Buckets: 16
          Filter: (avg((ft1.c1 * ((random() <= '1'::double precision))::integer)) > '100'::numeric)
          ->  Foreign Scan on public.ft1
                Output: c1, c2
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1"
-(10 rows)
+(11 rows)
 
 -- Remote aggregate in combination with a local Param (for the output
 -- of an initplan) can be trouble, per bug #15781
@@ -2963,10 +2968,11 @@ select sum(c1) filter (where (c1 / c1) * random() <= 1) from ft1 group by c2 ord
    ->  HashAggregate
          Output: sum(c1) FILTER (WHERE ((((c1 / c1))::double precision * random()) <= '1'::double precision)), c2
          Group Key: ft1.c2
+         Buckets: 16
          ->  Foreign Scan on public.ft1
                Output: c1, c2
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1"
-(9 rows)
+(10 rows)
 
 explain (verbose, costs off)
 select sum(c2) filter (where c2 in (select c2 from ft1 where c2 < 5)) from ft1;
@@ -3229,6 +3235,7 @@ select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x w
    ->  HashAggregate
          Output: count(*), x.b
          Group Key: x.b
+         Buckets: 16
          ->  Hash Join
                Output: x.b
                Inner Unique: true
@@ -3244,7 +3251,7 @@ select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x w
                                  Output: ft1_1.c2, (sum(ft1_1.c1))
                                  Relations: Aggregate on (public.ft1 ft1_1)
                                  Remote SQL: SELECT c2, sum("C 1") FROM "S 1"."T 1" GROUP BY 1
-(21 rows)
+(22 rows)
 
 select count(*), x.b from ft1, (select c2 a, sum(c1) b from ft1 group by c2) x where ft1.c2 = x.a group by x.b order by 1, 2;
  count |   b   
@@ -3449,11 +3456,12 @@ select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls la
    ->  MixedAggregate
          Output: c2, sum(c1)
          Hash Key: ft1.c2
+         Buckets: 16
          Group Key: ()
          ->  Foreign Scan on public.ft1
                Output: c2, c1
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(10 rows)
+(11 rows)
 
 select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls last;
  c2 |  sum   
@@ -3474,11 +3482,12 @@ select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last
    ->  MixedAggregate
          Output: c2, sum(c1)
          Hash Key: ft1.c2
+         Buckets: 16
          Group Key: ()
          ->  Foreign Scan on public.ft1
                Output: c2, c1
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(10 rows)
+(11 rows)
 
 select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last;
  c2 |  sum   
@@ -3499,11 +3508,13 @@ select c2, c6, sum(c1) from ft1 where c2 < 3 group by grouping sets(c2, c6) orde
    ->  HashAggregate
          Output: c2, c6, sum(c1)
          Hash Key: ft1.c2
+         Buckets: 16
          Hash Key: ft1.c6
+         Buckets: 16
          ->  Foreign Scan on public.ft1
                Output: c2, c6, c1
                Remote SQL: SELECT "C 1", c2, c6 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(10 rows)
+(12 rows)
 
 select c2, c6, sum(c1) from ft1 where c2 < 3 group by grouping sets(c2, c6) order by 1 nulls last, 2 nulls last;
  c2 | c6 |  sum  
@@ -3526,10 +3537,11 @@ select c2, sum(c1), grouping(c2) from ft1 where c2 < 3 group by c2 order by 1 nu
    ->  HashAggregate
          Output: c2, sum(c1), GROUPING(c2)
          Group Key: ft1.c2
+         Buckets: 16
          ->  Foreign Scan on public.ft1
                Output: c2, c1
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(9 rows)
+(10 rows)
 
 select c2, sum(c1), grouping(c2) from ft1 where c2 < 3 group by c2 order by 1 nulls last;
  c2 |  sum  | grouping 
@@ -7147,13 +7159,14 @@ select * from bar where f1 in (select f1 from foo) for update;
                ->  HashAggregate
                      Output: foo.ctid, foo.f1, foo.*, foo.tableoid
                      Group Key: foo.f1
+                     Buckets: 256
                      ->  Append
                            ->  Seq Scan on public.foo foo_1
                                  Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
                            ->  Foreign Scan on public.foo2 foo_2
                                  Output: foo_2.ctid, foo_2.f1, foo_2.*, foo_2.tableoid
                                  Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
-(23 rows)
+(24 rows)
 
 select * from bar where f1 in (select f1 from foo) for update;
  f1 | f2 
@@ -7185,13 +7198,14 @@ select * from bar where f1 in (select f1 from foo) for share;
                ->  HashAggregate
                      Output: foo.ctid, foo.f1, foo.*, foo.tableoid
                      Group Key: foo.f1
+                     Buckets: 256
                      ->  Append
                            ->  Seq Scan on public.foo foo_1
                                  Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
                            ->  Foreign Scan on public.foo2 foo_2
                                  Output: foo_2.ctid, foo_2.f1, foo_2.*, foo_2.tableoid
                                  Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
-(23 rows)
+(24 rows)
 
 select * from bar where f1 in (select f1 from foo) for share;
  f1 | f2 
@@ -7222,6 +7236,7 @@ update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
                ->  HashAggregate
                      Output: foo.ctid, foo.f1, foo.*, foo.tableoid
                      Group Key: foo.f1
+                     Buckets: 256
                      ->  Append
                            ->  Seq Scan on public.foo foo_1
                                  Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
@@ -7240,13 +7255,14 @@ update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
                ->  HashAggregate
                      Output: foo.ctid, foo.f1, foo.*, foo.tableoid
                      Group Key: foo.f1
+                     Buckets: 256
                      ->  Append
                            ->  Seq Scan on public.foo foo_1
                                  Output: foo_1.ctid, foo_1.f1, foo_1.*, foo_1.tableoid
                            ->  Foreign Scan on public.foo2 foo_2
                                  Output: foo_2.ctid, foo_2.f1, foo_2.*, foo_2.tableoid
                                  Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1
-(39 rows)
+(41 rows)
 
 update bar set f2 = f2 + 100 where f1 in (select f1 from foo);
 select tableoid::regclass, * from bar order by 1,2;
@@ -8751,12 +8767,13 @@ SELECT a, sum(b), min(b), count(*) FROM pagg_tab GROUP BY a HAVING avg(b) < 22 O
    Sort Key: pagg_tab.a
    ->  HashAggregate
          Group Key: pagg_tab.a
+         Buckets: 64
          Filter: (avg(pagg_tab.b) < '22'::numeric)
          ->  Append
                ->  Foreign Scan on fpagg_tab_p1 pagg_tab_1
                ->  Foreign Scan on fpagg_tab_p2 pagg_tab_2
                ->  Foreign Scan on fpagg_tab_p3 pagg_tab_3
-(9 rows)
+(10 rows)
 
 -- Plan with partitionwise aggregates is enabled
 SET enable_partitionwise_aggregate TO true;
@@ -8799,6 +8816,7 @@ SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
          ->  HashAggregate
                Output: t1.a, count(((t1.*)::pagg_tab))
                Group Key: t1.a
+               Buckets: 16
                Filter: (avg(t1.b) < '22'::numeric)
                ->  Foreign Scan on public.fpagg_tab_p1 t1
                      Output: t1.a, t1.*, t1.b
@@ -8806,6 +8824,7 @@ SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
          ->  HashAggregate
                Output: t1_1.a, count(((t1_1.*)::pagg_tab))
                Group Key: t1_1.a
+               Buckets: 16
                Filter: (avg(t1_1.b) < '22'::numeric)
                ->  Foreign Scan on public.fpagg_tab_p2 t1_1
                      Output: t1_1.a, t1_1.*, t1_1.b
@@ -8813,11 +8832,12 @@ SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
          ->  HashAggregate
                Output: t1_2.a, count(((t1_2.*)::pagg_tab))
                Group Key: t1_2.a
+               Buckets: 16
                Filter: (avg(t1_2.b) < '22'::numeric)
                ->  Foreign Scan on public.fpagg_tab_p3 t1_2
                      Output: t1_2.a, t1_2.*, t1_2.b
                      Remote SQL: SELECT a, b, c FROM public.pagg_tab_p3
-(25 rows)
+(28 rows)
 
 SELECT a, count(t1) FROM pagg_tab t1 GROUP BY a HAVING avg(b) < 22 ORDER BY 1;
  a  | count 
@@ -8839,18 +8859,22 @@ SELECT b, avg(a), max(a), count(*) FROM pagg_tab GROUP BY b HAVING sum(a) < 700
    Sort Key: pagg_tab.b
    ->  Finalize HashAggregate
          Group Key: pagg_tab.b
+         Buckets: 64
          Filter: (sum(pagg_tab.a) < 700)
          ->  Append
                ->  Partial HashAggregate
                      Group Key: pagg_tab.b
+                     Buckets: 64
                      ->  Foreign Scan on fpagg_tab_p1 pagg_tab
                ->  Partial HashAggregate
                      Group Key: pagg_tab_1.b
+                     Buckets: 64
                      ->  Foreign Scan on fpagg_tab_p2 pagg_tab_1
                ->  Partial HashAggregate
                      Group Key: pagg_tab_2.b
+                     Buckets: 64
                      ->  Foreign Scan on fpagg_tab_p3 pagg_tab_2
-(15 rows)
+(19 rows)
 
 -- ===================================================================
 -- access rights and superuser
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index d901dc4a50..ff22181510 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -18,6 +18,7 @@
 #include "commands/createas.h"
 #include "commands/defrem.h"
 #include "commands/prepare.h"
+#include "executor/nodeAgg.h"
 #include "executor/nodeHash.h"
 #include "foreign/fdwapi.h"
 #include "jit/jit.h"
@@ -86,12 +87,14 @@ static void show_merge_append_keys(MergeAppendState *mstate, List *ancestors,
 								   ExplainState *es);
 static void show_agg_keys(AggState *astate, List *ancestors,
 						  ExplainState *es);
-static void show_grouping_sets(PlanState *planstate, Agg *agg,
+static void show_grouping_sets(AggState *aggstate, Agg *agg,
 							   List *ancestors, ExplainState *es);
-static void show_grouping_set_keys(PlanState *planstate,
+static void show_grouping_set_info(AggState *aggstate,
 								   Agg *aggnode, Sort *sortnode,
 								   List *context, bool useprefix,
-								   List *ancestors, ExplainState *es);
+								   List *ancestors,
+								   HashTableInstrumentation *inst,
+								   ExplainState *es);
 static void show_group_keys(GroupState *gstate, List *ancestors,
 							ExplainState *es);
 static void show_sort_group_keys(PlanState *planstate, const char *qlabel,
@@ -104,6 +107,7 @@ static void show_tablesample(TableSampleClause *tsc, PlanState *planstate,
 							 List *ancestors, ExplainState *es);
 static void show_sort_info(SortState *sortstate, ExplainState *es);
 static void show_hash_info(HashState *hashstate, ExplainState *es);
+static void show_tuplehash_info(HashTableInstrumentation *inst, ExplainState *es);
 static void show_tidbitmap_info(BitmapHeapScanState *planstate,
 								ExplainState *es);
 static void show_instrumentation_count(const char *qlabel, int which,
@@ -1489,6 +1493,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 					appendStringInfo(es->str, " %s", setopcmd);
 				else
 					ExplainPropertyText("Command", setopcmd, es);
+				// show strategy in text mode ?
 			}
 			break;
 		default:
@@ -1886,6 +1891,20 @@ ExplainNode(PlanState *planstate, List *ancestors,
 				show_instrumentation_count("Rows Removed by Filter", 1,
 										   planstate, es);
 			break;
+		case T_SetOp:
+			{
+				SetOpState *sos = castNode(SetOpState, planstate);
+				if (sos->hashtable)
+					show_tuplehash_info(&sos->hashtable->instrument, es);
+			}
+			break;
+		case T_RecursiveUnion:
+			{
+				RecursiveUnionState *rus = (RecursiveUnionState *)planstate;
+				if (rus->hashtable)
+					show_tuplehash_info(&rus->hashtable->instrument, es);
+				break;
+			}
 		case T_Group:
 			show_group_keys(castNode(GroupState, planstate), ancestors, es);
 			show_upper_qual(plan->qual, "Filter", planstate, ancestors, es);
@@ -2262,24 +2281,31 @@ show_agg_keys(AggState *astate, List *ancestors,
 		ancestors = lcons(plan, ancestors);
 
 		if (plan->groupingSets)
-			show_grouping_sets(outerPlanState(astate), plan, ancestors, es);
+			show_grouping_sets(astate, plan, ancestors, es);
 		else
+		{
 			show_sort_group_keys(outerPlanState(astate), "Group Key",
 								 plan->numCols, plan->grpColIdx,
 								 NULL, NULL, NULL,
 								 ancestors, es);
+			Assert(astate->num_hashes <= 1);
+			if (astate->num_hashes)
+				show_tuplehash_info(&astate->perhash[0].hashtable->instrument, es);
+		}
 
 		ancestors = list_delete_first(ancestors);
 	}
 }
 
 static void
-show_grouping_sets(PlanState *planstate, Agg *agg,
+show_grouping_sets(AggState *aggstate, Agg *agg,
 				   List *ancestors, ExplainState *es)
 {
+	PlanState	*planstate = outerPlanState(aggstate);
 	List	   *context;
 	bool		useprefix;
 	ListCell   *lc;
+	int			setno = 0;
 
 	/* Set up deparsing context */
 	context = set_deparse_context_plan(es->deparse_cxt,
@@ -2289,27 +2315,41 @@ show_grouping_sets(PlanState *planstate, Agg *agg,
 
 	ExplainOpenGroup("Grouping Sets", "Grouping Sets", false, es);
 
-	show_grouping_set_keys(planstate, agg, NULL,
-						   context, useprefix, ancestors, es);
+	show_grouping_set_info(aggstate, agg, NULL, context, useprefix, ancestors,
+			aggstate->num_hashes ?
+			&aggstate->perhash[setno++].hashtable->instrument : NULL,
+			es);
 
 	foreach(lc, agg->chain)
 	{
 		Agg		   *aggnode = lfirst(lc);
 		Sort	   *sortnode = (Sort *) aggnode->plan.lefttree;
+		HashTableInstrumentation *inst = NULL;
+
+		if (aggnode->aggstrategy == AGG_HASHED ||
+				aggnode->aggstrategy == AGG_MIXED)
+		{
+			Assert(setno < aggstate->num_hashes);
+			inst = &aggstate->perhash[setno++].hashtable->instrument;
+		}
 
-		show_grouping_set_keys(planstate, aggnode, sortnode,
-							   context, useprefix, ancestors, es);
+		show_grouping_set_info(aggstate, aggnode, sortnode,
+							   context, useprefix, ancestors,
+							   inst, es);
 	}
 
 	ExplainCloseGroup("Grouping Sets", "Grouping Sets", false, es);
 }
 
+/* Show keys and any hash instrumentation for a grouping set */
 static void
-show_grouping_set_keys(PlanState *planstate,
+show_grouping_set_info(AggState *aggstate,
 					   Agg *aggnode, Sort *sortnode,
 					   List *context, bool useprefix,
-					   List *ancestors, ExplainState *es)
+					   List *ancestors, HashTableInstrumentation *inst,
+					   ExplainState *es)
 {
+	PlanState	*planstate = outerPlanState(aggstate);
 	Plan	   *plan = planstate->plan;
 	char	   *exprstr;
 	ListCell   *lc;
@@ -2373,6 +2413,10 @@ show_grouping_set_keys(PlanState *planstate,
 
 	ExplainCloseGroup(keysetname, keysetname, false, es);
 
+	if (aggnode->aggstrategy == AGG_HASHED ||
+			aggnode->aggstrategy == AGG_MIXED)
+		show_tuplehash_info(inst, es);
+
 	if (sortnode && es->format == EXPLAIN_FORMAT_TEXT)
 		es->indent--;
 
@@ -2769,6 +2813,54 @@ show_hash_info(HashState *hashstate, ExplainState *es)
 	}
 }
 
+/*
+ * Show hash bucket stats and (optionally) memory.
+ */
+static void
+show_tuplehash_info(HashTableInstrumentation *inst, ExplainState *es)
+{
+	long	spacePeakKb_tuples = (inst->space_peak_tuples + 1023) / 1024,
+		spacePeakKb_hash = (inst->space_peak_hash + 1023) / 1024;
+
+	if (es->format != EXPLAIN_FORMAT_TEXT)
+	{
+		ExplainPropertyInteger("Hash Buckets", NULL,
+							   inst->nbuckets, es);
+		ExplainPropertyInteger("Original Hash Buckets", NULL,
+							   inst->nbuckets_original, es);
+		ExplainPropertyInteger("Peak Memory Usage (hashtable)", "kB",
+							   spacePeakKb_hash, es);
+		ExplainPropertyInteger("Peak Memory Usage (tuples)", "kB",
+							   spacePeakKb_tuples, es);
+	}
+	else if (!inst->nbuckets)
+		; /* Do nothing */
+	else
+	{
+		if (inst->nbuckets_original != inst->nbuckets)
+		{
+			ExplainIndentText(es);
+			appendStringInfo(es->str,
+						"Buckets: %ld (originally %ld)",
+						inst->nbuckets,
+						inst->nbuckets_original);
+		}
+		else
+		{
+			ExplainIndentText(es);
+			appendStringInfo(es->str,
+						"Buckets: %ld",
+						inst->nbuckets);
+		}
+
+		if (es->analyze)
+			appendStringInfo(es->str,
+					"  Memory Usage: hashtable: %ldkB, tuples: %ldkB",
+					spacePeakKb_hash, spacePeakKb_tuples);
+		appendStringInfoChar(es->str, '\n');
+	}
+}
+
 /*
  * If it's EXPLAIN ANALYZE, show exact/lossy pages for a BitmapHeapScan node
  */
@@ -3436,6 +3528,29 @@ ExplainSubPlans(List *plans, List *ancestors,
 
 		ExplainNode(sps->planstate, ancestors,
 					relationship, sp->plan_name, es);
+		if (sps->hashtable)
+		{
+			ExplainOpenGroup("Hashtable", "Hashtable", true, es);
+			if (es->format == EXPLAIN_FORMAT_TEXT)
+			{
+				ExplainIndentText(es);
+				appendStringInfoString(es->str, "Hashtable: ");
+			}
+			show_tuplehash_info(&sps->hashtable->instrument, es);
+			ExplainCloseGroup("Hashtable", "Hashtable", true, es);
+		}
+
+		if (sps->hashnulls)
+		{
+			ExplainOpenGroup("Null Hashtable", "Null Hashtable", true, es);
+			if (es->format == EXPLAIN_FORMAT_TEXT)
+			{
+				ExplainIndentText(es);
+				appendStringInfoString(es->str, "Null Hashtable: ");
+			}
+			show_tuplehash_info(&sps->hashnulls->instrument, es);
+			ExplainCloseGroup("Null Hashtable", "Null Hashtable", true, es);
+		}
 
 		ancestors = list_delete_first(ancestors);
 	}
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 009d27b9a8..2f5258e1d2 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -188,6 +188,7 @@ BuildTupleHashTableExt(PlanState *parent,
 	hashtable->inputslot = NULL;
 	hashtable->in_hash_funcs = NULL;
 	hashtable->cur_eq_func = NULL;
+	memset(&hashtable->instrument, 0, sizeof(hashtable->instrument));
 
 	/*
 	 * If parallelism is in use, even if the master backend is performing the
@@ -203,6 +204,7 @@ BuildTupleHashTableExt(PlanState *parent,
 		hashtable->hash_iv = 0;
 
 	hashtable->hashtab = tuplehash_create(metacxt, nbuckets, hashtable);
+	UpdateTupleHashTableStats(hashtable, true);
 
 	/*
 	 * We copy the input tuple descriptor just for safety --- we assume all
@@ -281,9 +283,34 @@ BuildTupleHashTable(PlanState *parent,
 void
 ResetTupleHashTable(TupleHashTable hashtable)
 {
+	UpdateTupleHashTableStats(hashtable, false);
 	tuplehash_reset(hashtable->hashtab);
 }
 
+/* Update instrumentation stats */
+void
+UpdateTupleHashTableStats(TupleHashTable hashtable, bool initial)
+{
+	hashtable->instrument.nbuckets = hashtable->hashtab->size;
+	if (initial)
+	{
+		hashtable->instrument.nbuckets_original = hashtable->hashtab->size;
+		hashtable->instrument.space_peak_hash = hashtable->hashtab->size *
+			sizeof(TupleHashEntryData);
+		hashtable->instrument.space_peak_tuples = 0;
+	}
+	else
+	{
+#define maxself(a,b) a=Max(a,b)
+		/* hashtable->entrysize includes additionalsize */
+		maxself(hashtable->instrument.space_peak_hash,
+				hashtable->hashtab->size * sizeof(TupleHashEntryData));
+		maxself(hashtable->instrument.space_peak_tuples,
+				hashtable->hashtab->members * hashtable->entrysize);
+#undef maxself
+	}
+}
+
 /*
  * Find or create a hashtable entry for the tuple group containing the
  * given tuple.  The tuple must be the same type as the hashtable entries.
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 13c21ffe9a..4b8525edb6 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1710,6 +1710,7 @@ agg_retrieve_direct(AggState *aggstate)
 				 */
 				initialize_phase(aggstate, 0);
 				aggstate->table_filled = true;
+				UpdateTupleHashTableStats(aggstate->perhash[0].hashtable, false);
 				ResetTupleHashIterator(aggstate->perhash[0].hashtable,
 									   &aggstate->perhash[0].hashiter);
 				select_current_set(aggstate, 0, true);
@@ -1907,6 +1908,13 @@ agg_retrieve_direct(AggState *aggstate)
 						}
 					}
 				}
+
+				if (aggstate->aggstrategy == AGG_MIXED &&
+						aggstate->current_phase == 1)
+				{
+					for (int i = 0; i < aggstate->num_hashes; i++)
+						UpdateTupleHashTableStats(aggstate->perhash[i].hashtable, false);
+				}
 			}
 
 			/*
@@ -1981,6 +1989,9 @@ agg_fill_hash_table(AggState *aggstate)
 	}
 
 	aggstate->table_filled = true;
+	for (int i = 0; i < aggstate->num_hashes; i++)
+		UpdateTupleHashTableStats(aggstate->perhash[i].hashtable, false);
+
 	/* Initialize to walk the first hash table */
 	select_current_set(aggstate, 0, true);
 	ResetTupleHashIterator(aggstate->perhash[0].hashtable,
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index 620414a1ed..93272c28b1 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -156,6 +156,9 @@ ExecRecursiveUnion(PlanState *pstate)
 		return slot;
 	}
 
+	if (node->hashtable)
+		UpdateTupleHashTableStats(node->hashtable, false);
+
 	return NULL;
 }
 
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index bfd148a41a..9c0e0ab96e 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -415,6 +415,7 @@ setop_fill_hash_table(SetOpState *setopstate)
 
 	setopstate->table_filled = true;
 	/* Initialize to walk the hash table */
+	UpdateTupleHashTableStats(setopstate->hashtable, false);
 	ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
 }
 
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 298b7757f5..22c32612ba 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -621,6 +621,9 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 	ExecClearTuple(node->projRight->pi_state.resultslot);
 
 	MemoryContextSwitchTo(oldcontext);
+	UpdateTupleHashTableStats(node->hashtable, false);
+	if (node->hashnulls)
+		UpdateTupleHashTableStats(node->hashnulls, false);
 }
 
 /*
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 81fdfa4add..34199b57e6 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -150,6 +150,7 @@ extern TupleHashEntry FindTupleHashEntry(TupleHashTable hashtable,
 										 ExprState *eqcomp,
 										 FmgrInfo *hashfunctions);
 extern void ResetTupleHashTable(TupleHashTable hashtable);
+extern void UpdateTupleHashTableStats(TupleHashTable hashtable, bool initial);
 
 /*
  * prototypes from functions in execJunk.c
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index cd3ddf781f..cfeada5f1d 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -691,6 +691,14 @@ typedef struct TupleHashEntryData
 #define SH_DECLARE
 #include "lib/simplehash.h"
 
+typedef struct HashTableInstrumentation
+{
+	size_t	nbuckets;				/* number of buckets at end of execution */
+	size_t	nbuckets_original;		/* planned number of buckets */
+	size_t	space_peak_hash;	/* peak memory usage in bytes */
+	size_t	space_peak_tuples;	/* peak memory usage in bytes */
+} HashTableInstrumentation;
+
 typedef struct TupleHashTableData
 {
 	tuplehash_hash *hashtab;	/* underlying hash table */
@@ -709,6 +717,7 @@ typedef struct TupleHashTableData
 	ExprState  *cur_eq_func;	/* comparator for input vs. table */
 	uint32		hash_iv;		/* hash-function IV */
 	ExprContext *exprcontext;	/* expression context */
+	HashTableInstrumentation instrument;
 }			TupleHashTableData;
 
 typedef tuplehash_iterator TupleHashIterator;
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index f457b5b150..b173b32cab 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -517,10 +517,11 @@ order by 1, 2;
          ->  HashAggregate
                Output: s2.s2, sum((s1.s1 + s2.s2))
                Group Key: s2.s2
+               Buckets: 4
                ->  Function Scan on pg_catalog.generate_series s2
                      Output: s2.s2
                      Function Call: generate_series(1, 3)
-(14 rows)
+(15 rows)
 
 select s1, s2, sm
 from generate_series(1, 3) s1,
@@ -556,10 +557,11 @@ select array(select sum(x+y) s
            ->  HashAggregate
                  Output: sum((x.x + y.y)), y.y
                  Group Key: y.y
+                 Buckets: 4
                  ->  Function Scan on pg_catalog.generate_series y
                        Output: y.y
                        Function Call: generate_series(1, 3)
-(13 rows)
+(14 rows)
 
 select array(select sum(x+y) s
             from generate_series(1,3) y group by y order by s)
@@ -872,12 +874,13 @@ explain (costs off)
 ---------------------------------------------------------------------
  HashAggregate
    Group Key: $0
+   Buckets: 2
    InitPlan 1 (returns $0)
      ->  Limit
            ->  Index Only Scan Backward using tenk1_unique2 on tenk1
                  Index Cond: (unique2 IS NOT NULL)
    ->  Result
-(7 rows)
+(8 rows)
 
 select distinct max(unique2) from tenk1;
  max  
@@ -1096,8 +1099,9 @@ explain (costs off) select * from t1 group by a,b,c,d;
 ----------------------
  HashAggregate
    Group Key: a, b
+   Buckets: 256
    ->  Seq Scan on t1
-(3 rows)
+(4 rows)
 
 -- No removal can happen if the complete PK is not present in GROUP BY
 explain (costs off) select a,c from t1 group by a,c,d;
@@ -1105,8 +1109,9 @@ explain (costs off) select a,c from t1 group by a,c,d;
 ----------------------
  HashAggregate
    Group Key: a, c, d
+   Buckets: 256
    ->  Seq Scan on t1
-(3 rows)
+(4 rows)
 
 -- Test removal across multiple relations
 explain (costs off) select *
@@ -1116,12 +1121,13 @@ group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.y,t2.z;
 ------------------------------------------------------
  HashAggregate
    Group Key: t1.a, t1.b, t2.x, t2.y
+   Buckets: 128
    ->  Hash Join
          Hash Cond: ((t2.x = t1.a) AND (t2.y = t1.b))
          ->  Seq Scan on t2
          ->  Hash
                ->  Seq Scan on t1
-(7 rows)
+(8 rows)
 
 -- Test case where t1 can be optimized but not t2
 explain (costs off) select t1.*,t2.x,t2.z
@@ -1131,12 +1137,13 @@ group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.z;
 ------------------------------------------------------
  HashAggregate
    Group Key: t1.a, t1.b, t2.x, t2.z
+   Buckets: 128
    ->  Hash Join
          Hash Cond: ((t2.x = t1.a) AND (t2.y = t1.b))
          ->  Seq Scan on t2
          ->  Hash
                ->  Seq Scan on t1
-(7 rows)
+(8 rows)
 
 -- Cannot optimize when PK is deferrable
 explain (costs off) select * from t3 group by a,b,c;
@@ -1144,8 +1151,9 @@ explain (costs off) select * from t3 group by a,b,c;
 ----------------------
  HashAggregate
    Group Key: a, b, c
+   Buckets: 256
    ->  Seq Scan on t3
-(3 rows)
+(4 rows)
 
 create temp table t1c () inherits (t1);
 -- Ensure we don't remove any columns when t1 has a child table
@@ -1154,10 +1162,11 @@ explain (costs off) select * from t1 group by a,b,c,d;
 -------------------------------------
  HashAggregate
    Group Key: t1.a, t1.b, t1.c, t1.d
+   Buckets: 256
    ->  Append
          ->  Seq Scan on t1 t1_1
          ->  Seq Scan on t1c t1_2
-(5 rows)
+(6 rows)
 
 -- Okay to remove columns if we're only querying the parent.
 explain (costs off) select * from only t1 group by a,b,c,d;
@@ -1165,8 +1174,9 @@ explain (costs off) select * from only t1 group by a,b,c,d;
 ----------------------
  HashAggregate
    Group Key: a, b
+   Buckets: 2
    ->  Seq Scan on t1
-(3 rows)
+(4 rows)
 
 create temp table p_t1 (
   a int,
@@ -1183,10 +1193,11 @@ explain (costs off) select * from p_t1 group by a,b,c,d;
 --------------------------------
  HashAggregate
    Group Key: p_t1.a, p_t1.b
+   Buckets: 512
    ->  Append
          ->  Seq Scan on p_t1_1
          ->  Seq Scan on p_t1_2
-(5 rows)
+(6 rows)
 
 drop table t1 cascade;
 NOTICE:  drop cascades to table t1c
@@ -2354,6 +2365,7 @@ explain (costs off)
    ->  Hash
          ->  HashAggregate
                Group Key: onek.twothousand, onek.twothousand
+               Buckets: 256
                ->  Seq Scan on onek
-(8 rows)
+(9 rows)
 
diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out
index c1f802c88a..be386731ce 100644
--- a/src/test/regress/expected/groupingsets.out
+++ b/src/test/regress/expected/groupingsets.out
@@ -974,9 +974,11 @@ explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v)
    Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2
    ->  HashAggregate
          Hash Key: "*VALUES*".column1
+         Buckets: 16
          Hash Key: "*VALUES*".column2
+         Buckets: 16
          ->  Values Scan on "*VALUES*"
-(6 rows)
+(8 rows)
 
 select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by cube(a,b) order by 3,1,2;
@@ -1008,11 +1010,14 @@ explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v)
    Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2
    ->  MixedAggregate
          Hash Key: "*VALUES*".column1, "*VALUES*".column2
+         Buckets: 16
          Hash Key: "*VALUES*".column1
+         Buckets: 16
          Hash Key: "*VALUES*".column2
+         Buckets: 16
          Group Key: ()
          ->  Values Scan on "*VALUES*"
-(8 rows)
+(11 rows)
 
 -- shouldn't try and hash
 explain (costs off)
@@ -1071,11 +1076,12 @@ explain (costs off)
    Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v))
    ->  MixedAggregate
          Hash Key: unsortable_col
+         Buckets: 256
          Group Key: unhashable_col
          ->  Sort
                Sort Key: unhashable_col
                ->  Seq Scan on gstest4
-(8 rows)
+(9 rows)
 
 select unhashable_col, unsortable_col,
        grouping(unhashable_col, unsortable_col),
@@ -1114,11 +1120,12 @@ explain (costs off)
    Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v))
    ->  MixedAggregate
          Hash Key: v, unsortable_col
+         Buckets: 256
          Group Key: v, unhashable_col
          ->  Sort
                Sort Key: v, unhashable_col
                ->  Seq Scan on gstest4
-(8 rows)
+(9 rows)
 
 -- empty input: first is 0 rows, second 1, third 3 etc.
 select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a);
@@ -1132,9 +1139,11 @@ explain (costs off)
 --------------------------------
  HashAggregate
    Hash Key: a, b
+   Buckets: 256
    Hash Key: a
+   Buckets: 256
    ->  Seq Scan on gstest_empty
-(4 rows)
+(6 rows)
 
 select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),());
  a | b | sum | count 
@@ -1156,11 +1165,12 @@ explain (costs off)
 --------------------------------
  MixedAggregate
    Hash Key: a, b
+   Buckets: 256
    Group Key: ()
    Group Key: ()
    Group Key: ()
    ->  Seq Scan on gstest_empty
-(6 rows)
+(7 rows)
 
 select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
  sum | count 
@@ -1201,9 +1211,11 @@ explain (costs off)
 ---------------------------
  HashAggregate
    Hash Key: a, b
+   Buckets: 2
    Hash Key: a, c
+   Buckets: 2
    ->  Seq Scan on gstest3
-(4 rows)
+(6 rows)
 
 -- simple rescan tests
 select a, b, sum(v.x)
@@ -1230,11 +1242,13 @@ explain (costs off)
    Sort Key: (sum("*VALUES*".column1)), gstest_data.a, gstest_data.b
    ->  HashAggregate
          Hash Key: gstest_data.a
+         Buckets: 256
          Hash Key: gstest_data.b
+         Buckets: 256
          ->  Nested Loop
                ->  Values Scan on "*VALUES*"
                ->  Function Scan on gstest_data
-(8 rows)
+(10 rows)
 
 select *
   from (values (1),(2)) v(x),
@@ -1286,10 +1300,13 @@ explain (costs off)
    Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), (max("*VALUES*".column3))
    ->  HashAggregate
          Hash Key: "*VALUES*".column1, "*VALUES*".column2
+         Buckets: 16
          Hash Key: ("*VALUES*".column1 + 1), ("*VALUES*".column2 + 1)
+         Buckets: 16
          Hash Key: ("*VALUES*".column1 + 2), ("*VALUES*".column2 + 2)
+         Buckets: 16
          ->  Values Scan on "*VALUES*"
-(7 rows)
+(10 rows)
 
 select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
   from gstest2 group by cube (a,b) order by rsum, a, b;
@@ -1317,11 +1334,14 @@ explain (costs off)
                Sort Key: a, b
                ->  MixedAggregate
                      Hash Key: a, b
+                     Buckets: 256
                      Hash Key: a
+                     Buckets: 256
                      Hash Key: b
+                     Buckets: 256
                      Group Key: ()
                      ->  Seq Scan on gstest2
-(11 rows)
+(14 rows)
 
 select a, b, sum(v.x)
   from (values (1),(2)) v(x), gstest_data(v.x)
@@ -1352,13 +1372,16 @@ explain (costs off)
    Sort Key: gstest_data.a, gstest_data.b
    ->  MixedAggregate
          Hash Key: gstest_data.a, gstest_data.b
+         Buckets: 256
          Hash Key: gstest_data.a
+         Buckets: 256
          Hash Key: gstest_data.b
+         Buckets: 256
          Group Key: ()
          ->  Nested Loop
                ->  Values Scan on "*VALUES*"
                ->  Function Scan on gstest_data
-(10 rows)
+(13 rows)
 
 -- Verify that we correctly handle the child node returning a
 -- non-minimal slot, which happens if the input is pre-sorted,
@@ -1553,9 +1576,13 @@ explain (costs off)
 -------------------------------
  MixedAggregate
    Hash Key: two
+   Buckets: 2
    Hash Key: four
+   Buckets: 4
    Hash Key: ten
+   Buckets: 16
    Hash Key: hundred
+   Buckets: 128
    Group Key: unique1
    Sort Key: twothousand
      Group Key: twothousand
@@ -1564,7 +1591,7 @@ explain (costs off)
    ->  Sort
          Sort Key: unique1
          ->  Seq Scan on tenk1
-(13 rows)
+(17 rows)
 
 explain (costs off)
   select unique1,
@@ -1576,14 +1603,18 @@ explain (costs off)
 -------------------------------
  MixedAggregate
    Hash Key: two
+   Buckets: 2
    Hash Key: four
+   Buckets: 4
    Hash Key: ten
+   Buckets: 16
    Hash Key: hundred
+   Buckets: 128
    Group Key: unique1
    ->  Sort
          Sort Key: unique1
          ->  Seq Scan on tenk1
-(9 rows)
+(13 rows)
 
 set work_mem = '384kB';
 explain (costs off)
@@ -1596,17 +1627,22 @@ explain (costs off)
 -------------------------------
  MixedAggregate
    Hash Key: two
+   Buckets: 2
    Hash Key: four
+   Buckets: 4
    Hash Key: ten
+   Buckets: 16
    Hash Key: hundred
+   Buckets: 128
    Hash Key: thousand
+   Buckets: 2048
    Group Key: unique1
    Sort Key: twothousand
      Group Key: twothousand
    ->  Sort
          Sort Key: unique1
          ->  Seq Scan on tenk1
-(12 rows)
+(17 rows)
 
 -- check collation-sensitive matching between grouping expressions
 -- (similar to a check for aggregates, but there are additional code
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index 761376b007..9f07501369 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -6199,6 +6199,7 @@ where exists (select 1 from tenk1 t3
          ->  HashAggregate
                Output: t3.thousand, t3.tenthous
                Group Key: t3.thousand, t3.tenthous
+               Buckets: 16384
                ->  Index Only Scan using tenk1_thous_tenthous on public.tenk1 t3
                      Output: t3.thousand, t3.tenthous
          ->  Hash
@@ -6209,7 +6210,7 @@ where exists (select 1 from tenk1 t3
    ->  Index Only Scan using tenk1_hundred on public.tenk1 t2
          Output: t2.hundred
          Index Cond: (t2.hundred = t3.tenthous)
-(18 rows)
+(19 rows)
 
 -- ... unless it actually is unique
 create table j3 as select unique1, tenthous from onek;
diff --git a/src/test/regress/expected/matview.out b/src/test/regress/expected/matview.out
index d0121a7b0b..ca8573ac6d 100644
--- a/src/test/regress/expected/matview.out
+++ b/src/test/regress/expected/matview.out
@@ -23,8 +23,9 @@ EXPLAIN (costs off)
 ----------------------------
  HashAggregate
    Group Key: type
+   Buckets: 256
    ->  Seq Scan on mvtest_t
-(3 rows)
+(4 rows)
 
 CREATE MATERIALIZED VIEW mvtest_tm AS SELECT type, sum(amt) AS totamt FROM mvtest_t GROUP BY type WITH NO DATA;
 SELECT relispopulated FROM pg_class WHERE oid = 'mvtest_tm'::regclass;
@@ -61,8 +62,9 @@ EXPLAIN (costs off)
    Sort Key: mvtest_t.type
    ->  HashAggregate
          Group Key: mvtest_t.type
+         Buckets: 256
          ->  Seq Scan on mvtest_t
-(5 rows)
+(6 rows)
 
 CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type;
 SELECT * FROM mvtest_tvm;
@@ -85,8 +87,9 @@ EXPLAIN (costs off)
  Aggregate
    ->  HashAggregate
          Group Key: mvtest_t.type
+         Buckets: 256
          ->  Seq Scan on mvtest_t
-(4 rows)
+(5 rows)
 
 CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv;
 CREATE VIEW mvtest_tvvmv AS SELECT * FROM mvtest_tvvm;
diff --git a/src/test/regress/expected/partition_aggregate.out b/src/test/regress/expected/partition_aggregate.out
index fbc8d3ac6c..5939c2a128 100644
--- a/src/test/regress/expected/partition_aggregate.out
+++ b/src/test/regress/expected/partition_aggregate.out
@@ -27,17 +27,20 @@ SELECT c, sum(a), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY c HAVI
    ->  Append
          ->  HashAggregate
                Group Key: pagg_tab.c
+               Buckets: 4
                Filter: (avg(pagg_tab.d) < '15'::numeric)
                ->  Seq Scan on pagg_tab_p1 pagg_tab
          ->  HashAggregate
                Group Key: pagg_tab_1.c
+               Buckets: 4
                Filter: (avg(pagg_tab_1.d) < '15'::numeric)
                ->  Seq Scan on pagg_tab_p2 pagg_tab_1
          ->  HashAggregate
                Group Key: pagg_tab_2.c
+               Buckets: 4
                Filter: (avg(pagg_tab_2.d) < '15'::numeric)
                ->  Seq Scan on pagg_tab_p3 pagg_tab_2
-(15 rows)
+(18 rows)
 
 SELECT c, sum(a), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY c HAVING avg(d) < 15 ORDER BY 1, 2, 3;
   c   | sum  |         avg         | count | min | max 
@@ -59,18 +62,22 @@ SELECT a, sum(b), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY a HAVI
    Sort Key: pagg_tab.a, (sum(pagg_tab.b)), (avg(pagg_tab.b))
    ->  Finalize HashAggregate
          Group Key: pagg_tab.a
+         Buckets: 32
          Filter: (avg(pagg_tab.d) < '15'::numeric)
          ->  Append
                ->  Partial HashAggregate
                      Group Key: pagg_tab.a
+                     Buckets: 32
                      ->  Seq Scan on pagg_tab_p1 pagg_tab
                ->  Partial HashAggregate
                      Group Key: pagg_tab_1.a
+                     Buckets: 32
                      ->  Seq Scan on pagg_tab_p2 pagg_tab_1
                ->  Partial HashAggregate
                      Group Key: pagg_tab_2.a
+                     Buckets: 32
                      ->  Seq Scan on pagg_tab_p3 pagg_tab_2
-(15 rows)
+(19 rows)
 
 SELECT a, sum(b), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY a HAVING avg(d) < 15 ORDER BY 1, 2, 3;
  a  | sum  |         avg         | count | min | max 
@@ -95,14 +102,17 @@ SELECT a, c, count(*) FROM pagg_tab GROUP BY a, c;
  Append
    ->  HashAggregate
          Group Key: pagg_tab.a, pagg_tab.c
+         Buckets: 128
          ->  Seq Scan on pagg_tab_p1 pagg_tab
    ->  HashAggregate
          Group Key: pagg_tab_1.a, pagg_tab_1.c
+         Buckets: 128
          ->  Seq Scan on pagg_tab_p2 pagg_tab_1
    ->  HashAggregate
          Group Key: pagg_tab_2.a, pagg_tab_2.c
+         Buckets: 128
          ->  Seq Scan on pagg_tab_p3 pagg_tab_2
-(10 rows)
+(13 rows)
 
 -- Check with multiple columns in GROUP BY, order in GROUP BY is reversed
 EXPLAIN (COSTS OFF)
@@ -112,14 +122,17 @@ SELECT a, c, count(*) FROM pagg_tab GROUP BY c, a;
  Append
    ->  HashAggregate
          Group Key: pagg_tab.c, pagg_tab.a
+         Buckets: 128
          ->  Seq Scan on pagg_tab_p1 pagg_tab
    ->  HashAggregate
          Group Key: pagg_tab_1.c, pagg_tab_1.a
+         Buckets: 128
          ->  Seq Scan on pagg_tab_p2 pagg_tab_1
    ->  HashAggregate
          Group Key: pagg_tab_2.c, pagg_tab_2.a
+         Buckets: 128
          ->  Seq Scan on pagg_tab_p3 pagg_tab_2
-(10 rows)
+(13 rows)
 
 -- Check with multiple columns in GROUP BY, order in target-list is reversed
 EXPLAIN (COSTS OFF)
@@ -129,14 +142,17 @@ SELECT c, a, count(*) FROM pagg_tab GROUP BY a, c;
  Append
    ->  HashAggregate
          Group Key: pagg_tab.a, pagg_tab.c
+         Buckets: 128
          ->  Seq Scan on pagg_tab_p1 pagg_tab
    ->  HashAggregate
          Group Key: pagg_tab_1.a, pagg_tab_1.c
+         Buckets: 128
          ->  Seq Scan on pagg_tab_p2 pagg_tab_1
    ->  HashAggregate
          Group Key: pagg_tab_2.a, pagg_tab_2.c
+         Buckets: 128
          ->  Seq Scan on pagg_tab_p3 pagg_tab_2
-(10 rows)
+(13 rows)
 
 -- Test when input relation for grouping is dummy
 EXPLAIN (COSTS OFF)
@@ -145,9 +161,10 @@ SELECT c, sum(a) FROM pagg_tab WHERE 1 = 2 GROUP BY c;
 --------------------------------
  HashAggregate
    Group Key: c
+   Buckets: 2
    ->  Result
          One-Time Filter: false
-(4 rows)
+(5 rows)
 
 SELECT c, sum(a) FROM pagg_tab WHERE 1 = 2 GROUP BY c;
  c | sum 
@@ -341,12 +358,13 @@ SELECT c, sum(a) FROM pagg_tab GROUP BY rollup(c) ORDER BY 1, 2;
    Sort Key: pagg_tab.c, (sum(pagg_tab.a))
    ->  MixedAggregate
          Hash Key: pagg_tab.c
+         Buckets: 16
          Group Key: ()
          ->  Append
                ->  Seq Scan on pagg_tab_p1 pagg_tab_1
                ->  Seq Scan on pagg_tab_p2 pagg_tab_2
                ->  Seq Scan on pagg_tab_p3 pagg_tab_3
-(9 rows)
+(10 rows)
 
 -- ORDERED SET within the aggregate.
 -- Full aggregation; since all the rows that belong to the same group come
@@ -418,6 +436,7 @@ SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2
    ->  Append
          ->  HashAggregate
                Group Key: t1.x
+               Buckets: 8
                ->  Hash Join
                      Hash Cond: (t1.x = t2.y)
                      ->  Seq Scan on pagg_tab1_p1 t1
@@ -425,6 +444,7 @@ SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2
                            ->  Seq Scan on pagg_tab2_p1 t2
          ->  HashAggregate
                Group Key: t1_1.x
+               Buckets: 8
                ->  Hash Join
                      Hash Cond: (t1_1.x = t2_1.y)
                      ->  Seq Scan on pagg_tab1_p2 t1_1
@@ -432,12 +452,13 @@ SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2
                            ->  Seq Scan on pagg_tab2_p2 t2_1
          ->  HashAggregate
                Group Key: t1_2.x
+               Buckets: 8
                ->  Hash Join
                      Hash Cond: (t2_2.y = t1_2.x)
                      ->  Seq Scan on pagg_tab2_p3 t2_2
                      ->  Hash
                            ->  Seq Scan on pagg_tab1_p3 t1_2
-(24 rows)
+(27 rows)
 
 SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3;
  x  | sum  | count 
@@ -458,6 +479,7 @@ SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t
    Sort Key: t1.x, (sum(t1.y)), (count(((t1.*)::pagg_tab1)))
    ->  HashAggregate
          Group Key: t1.x
+         Buckets: 16
          ->  Hash Join
                Hash Cond: (t1.x = t2.y)
                ->  Append
@@ -469,7 +491,7 @@ SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t
                            ->  Seq Scan on pagg_tab2_p1 t2_1
                            ->  Seq Scan on pagg_tab2_p2 t2_2
                            ->  Seq Scan on pagg_tab2_p3 t2_3
-(15 rows)
+(16 rows)
 
 SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3;
  x  | sum  | count 
@@ -491,6 +513,7 @@ SELECT t2.y, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2
    ->  Append
          ->  HashAggregate
                Group Key: t2.y
+               Buckets: 4
                ->  Hash Join
                      Hash Cond: (t1.x = t2.y)
                      ->  Seq Scan on pagg_tab1_p1 t1
@@ -498,6 +521,7 @@ SELECT t2.y, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2
                            ->  Seq Scan on pagg_tab2_p1 t2
          ->  HashAggregate
                Group Key: t2_1.y
+               Buckets: 4
                ->  Hash Join
                      Hash Cond: (t1_1.x = t2_1.y)
                      ->  Seq Scan on pagg_tab1_p2 t1_1
@@ -505,12 +529,13 @@ SELECT t2.y, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2
                            ->  Seq Scan on pagg_tab2_p2 t2_1
          ->  HashAggregate
                Group Key: t2_2.y
+               Buckets: 4
                ->  Hash Join
                      Hash Cond: (t2_2.y = t1_2.x)
                      ->  Seq Scan on pagg_tab2_p3 t2_2
                      ->  Hash
                            ->  Seq Scan on pagg_tab1_p3 t1_2
-(24 rows)
+(27 rows)
 
 -- When GROUP BY clause does not match; partial aggregation is performed for each partition.
 -- Also test GroupAggregate paths by disabling hash aggregates.
@@ -582,6 +607,7 @@ SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP B
          ->  Append
                ->  Partial HashAggregate
                      Group Key: b.y
+                     Buckets: 4
                      ->  Hash Left Join
                            Hash Cond: (a.x = b.y)
                            ->  Seq Scan on pagg_tab1_p1 a
@@ -589,6 +615,7 @@ SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP B
                                  ->  Seq Scan on pagg_tab2_p1 b
                ->  Partial HashAggregate
                      Group Key: b_1.y
+                     Buckets: 4
                      ->  Hash Left Join
                            Hash Cond: (a_1.x = b_1.y)
                            ->  Seq Scan on pagg_tab1_p2 a_1
@@ -596,12 +623,13 @@ SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP B
                                  ->  Seq Scan on pagg_tab2_p2 b_1
                ->  Partial HashAggregate
                      Group Key: b_2.y
+                     Buckets: 4
                      ->  Hash Right Join
                            Hash Cond: (b_2.y = a_2.x)
                            ->  Seq Scan on pagg_tab2_p3 b_2
                            ->  Hash
                                  ->  Seq Scan on pagg_tab1_p3 a_2
-(26 rows)
+(29 rows)
 
 SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST;
  y  | sum  
@@ -625,6 +653,7 @@ SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP
    ->  Append
          ->  HashAggregate
                Group Key: b.y
+               Buckets: 4
                ->  Hash Right Join
                      Hash Cond: (a.x = b.y)
                      ->  Seq Scan on pagg_tab1_p1 a
@@ -632,6 +661,7 @@ SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP
                            ->  Seq Scan on pagg_tab2_p1 b
          ->  HashAggregate
                Group Key: b_1.y
+               Buckets: 4
                ->  Hash Right Join
                      Hash Cond: (a_1.x = b_1.y)
                      ->  Seq Scan on pagg_tab1_p2 a_1
@@ -639,12 +669,13 @@ SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP
                            ->  Seq Scan on pagg_tab2_p2 b_1
          ->  HashAggregate
                Group Key: b_2.y
+               Buckets: 4
                ->  Hash Left Join
                      Hash Cond: (b_2.y = a_2.x)
                      ->  Seq Scan on pagg_tab2_p3 b_2
                      ->  Hash
                            ->  Seq Scan on pagg_tab1_p3 a_2
-(24 rows)
+(27 rows)
 
 SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST;
  y  | sum  
@@ -674,6 +705,7 @@ SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y G
          ->  Append
                ->  Partial HashAggregate
                      Group Key: a.x
+                     Buckets: 8
                      ->  Hash Full Join
                            Hash Cond: (a.x = b.y)
                            ->  Seq Scan on pagg_tab1_p1 a
@@ -681,6 +713,7 @@ SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y G
                                  ->  Seq Scan on pagg_tab2_p1 b
                ->  Partial HashAggregate
                      Group Key: a_1.x
+                     Buckets: 8
                      ->  Hash Full Join
                            Hash Cond: (a_1.x = b_1.y)
                            ->  Seq Scan on pagg_tab1_p2 a_1
@@ -688,12 +721,13 @@ SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y G
                                  ->  Seq Scan on pagg_tab2_p2 b_1
                ->  Partial HashAggregate
                      Group Key: a_2.x
+                     Buckets: 8
                      ->  Hash Full Join
                            Hash Cond: (b_2.y = a_2.x)
                            ->  Seq Scan on pagg_tab2_p3 b_2
                            ->  Hash
                                  ->  Seq Scan on pagg_tab1_p3 a_2
-(26 rows)
+(29 rows)
 
 SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST;
  x  | sum  
@@ -728,6 +762,7 @@ SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOI
    Sort Key: pagg_tab1.x, pagg_tab2.y
    ->  HashAggregate
          Group Key: pagg_tab1.x, pagg_tab2.y
+         Buckets: 256
          ->  Hash Left Join
                Hash Cond: (pagg_tab1.x = pagg_tab2.y)
                Filter: ((pagg_tab1.x > 5) OR (pagg_tab2.y < 20))
@@ -742,7 +777,7 @@ SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOI
                                  Filter: (y > 10)
                            ->  Seq Scan on pagg_tab2_p3 pagg_tab2_2
                                  Filter: (y > 10)
-(18 rows)
+(19 rows)
 
 SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20  GROUP BY a.x, b.y ORDER BY 1, 2;
  x  | y  | count 
@@ -768,6 +803,7 @@ SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOI
    Sort Key: pagg_tab1.x, pagg_tab2.y
    ->  HashAggregate
          Group Key: pagg_tab1.x, pagg_tab2.y
+         Buckets: 256
          ->  Hash Full Join
                Hash Cond: (pagg_tab1.x = pagg_tab2.y)
                Filter: ((pagg_tab1.x > 5) OR (pagg_tab2.y < 20))
@@ -782,7 +818,7 @@ SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOI
                                  Filter: (y > 10)
                            ->  Seq Scan on pagg_tab2_p3 pagg_tab2_2
                                  Filter: (y > 10)
-(18 rows)
+(19 rows)
 
 SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2;
  x  | y  | count 
@@ -831,18 +867,22 @@ SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22
    Sort Key: pagg_tab_m.a, (sum(pagg_tab_m.b)), (avg(pagg_tab_m.c))
    ->  Finalize HashAggregate
          Group Key: pagg_tab_m.a
+         Buckets: 64
          Filter: (avg(pagg_tab_m.c) < '22'::numeric)
          ->  Append
                ->  Partial HashAggregate
                      Group Key: pagg_tab_m.a
+                     Buckets: 16
                      ->  Seq Scan on pagg_tab_m_p1 pagg_tab_m
                ->  Partial HashAggregate
                      Group Key: pagg_tab_m_1.a
+                     Buckets: 16
                      ->  Seq Scan on pagg_tab_m_p2 pagg_tab_m_1
                ->  Partial HashAggregate
                      Group Key: pagg_tab_m_2.a
+                     Buckets: 16
                      ->  Seq Scan on pagg_tab_m_p3 pagg_tab_m_2
-(15 rows)
+(19 rows)
 
 SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3;
  a  | sum  |         avg         | count 
@@ -865,17 +905,20 @@ SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING su
    ->  Append
          ->  HashAggregate
                Group Key: pagg_tab_m.a, ((pagg_tab_m.a + pagg_tab_m.b) / 2)
+               Buckets: 128
                Filter: (sum(pagg_tab_m.b) < 50)
                ->  Seq Scan on pagg_tab_m_p1 pagg_tab_m
          ->  HashAggregate
                Group Key: pagg_tab_m_1.a, ((pagg_tab_m_1.a + pagg_tab_m_1.b) / 2)
+               Buckets: 128
                Filter: (sum(pagg_tab_m_1.b) < 50)
                ->  Seq Scan on pagg_tab_m_p2 pagg_tab_m_1
          ->  HashAggregate
                Group Key: pagg_tab_m_2.a, ((pagg_tab_m_2.a + pagg_tab_m_2.b) / 2)
+               Buckets: 128
                Filter: (sum(pagg_tab_m_2.b) < 50)
                ->  Seq Scan on pagg_tab_m_p3 pagg_tab_m_2
-(15 rows)
+(18 rows)
 
 SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3;
  a  | sum |         avg         | count 
@@ -898,17 +941,20 @@ SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAV
    ->  Append
          ->  HashAggregate
                Group Key: ((pagg_tab_m.a + pagg_tab_m.b) / 2), pagg_tab_m.c, pagg_tab_m.a
+               Buckets: 128
                Filter: ((sum(pagg_tab_m.b) = 50) AND (avg(pagg_tab_m.c) > '25'::numeric))
                ->  Seq Scan on pagg_tab_m_p1 pagg_tab_m
          ->  HashAggregate
                Group Key: ((pagg_tab_m_1.a + pagg_tab_m_1.b) / 2), pagg_tab_m_1.c, pagg_tab_m_1.a
+               Buckets: 128
                Filter: ((sum(pagg_tab_m_1.b) = 50) AND (avg(pagg_tab_m_1.c) > '25'::numeric))
                ->  Seq Scan on pagg_tab_m_p2 pagg_tab_m_1
          ->  HashAggregate
                Group Key: ((pagg_tab_m_2.a + pagg_tab_m_2.b) / 2), pagg_tab_m_2.c, pagg_tab_m_2.a
+               Buckets: 128
                Filter: ((sum(pagg_tab_m_2.b) = 50) AND (avg(pagg_tab_m_2.c) > '25'::numeric))
                ->  Seq Scan on pagg_tab_m_p3 pagg_tab_m_2
-(15 rows)
+(18 rows)
 
 SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3;
  a  | c  | sum |         avg         | count 
@@ -1032,6 +1078,7 @@ SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER B
    ->  Append
          ->  HashAggregate
                Group Key: pagg_tab_ml.a
+               Buckets: 16
                Filter: (avg(pagg_tab_ml.b) < '3'::numeric)
                ->  Seq Scan on pagg_tab_ml_p1 pagg_tab_ml
          ->  Finalize GroupAggregate
@@ -1042,9 +1089,11 @@ SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER B
                      ->  Append
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_ml_2.a
+                                 Buckets: 16
                                  ->  Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_ml_3.a
+                                 Buckets: 16
                                  ->  Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3
          ->  Finalize GroupAggregate
                Group Key: pagg_tab_ml_5.a
@@ -1054,11 +1103,13 @@ SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER B
                      ->  Append
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_ml_5.a
+                                 Buckets: 8
                                  ->  Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_ml_6.a
+                                 Buckets: 8
                                  ->  Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6
-(31 rows)
+(36 rows)
 
 SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3;
  a  | sum  | count 
@@ -1089,20 +1140,25 @@ SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3;
                ->  Append
                      ->  Partial HashAggregate
                            Group Key: pagg_tab_ml.b
+                           Buckets: 16
                            ->  Seq Scan on pagg_tab_ml_p1 pagg_tab_ml
                      ->  Partial HashAggregate
                            Group Key: pagg_tab_ml_1.b
+                           Buckets: 16
                            ->  Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1
                      ->  Partial HashAggregate
                            Group Key: pagg_tab_ml_2.b
+                           Buckets: 16
                            ->  Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2
                      ->  Partial HashAggregate
                            Group Key: pagg_tab_ml_3.b
+                           Buckets: 8
                            ->  Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3
                      ->  Partial HashAggregate
                            Group Key: pagg_tab_ml_4.b
+                           Buckets: 8
                            ->  Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4
-(22 rows)
+(27 rows)
 
 SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3;
  b |  sum  | count 
@@ -1124,25 +1180,30 @@ SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 O
    ->  Append
          ->  HashAggregate
                Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c
+               Buckets: 512
                Filter: (avg(pagg_tab_ml.b) > '7'::numeric)
                ->  Seq Scan on pagg_tab_ml_p1 pagg_tab_ml
          ->  HashAggregate
                Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c
+               Buckets: 256
                Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric)
                ->  Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1
          ->  HashAggregate
                Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c
+               Buckets: 256
                Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric)
                ->  Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2
          ->  HashAggregate
                Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c
+               Buckets: 128
                Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric)
                ->  Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3
          ->  HashAggregate
                Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c
+               Buckets: 128
                Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric)
                ->  Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4
-(23 rows)
+(28 rows)
 
 SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3;
  a  | sum  | count 
@@ -1183,6 +1244,7 @@ SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER B
                            Sort Key: pagg_tab_ml.a
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_ml.a
+                                 Buckets: 16
                                  ->  Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml
          ->  Finalize GroupAggregate
                Group Key: pagg_tab_ml_2.a
@@ -1194,9 +1256,11 @@ SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER B
                            ->  Parallel Append
                                  ->  Partial HashAggregate
                                        Group Key: pagg_tab_ml_2.a
+                                       Buckets: 16
                                        ->  Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2
                                  ->  Partial HashAggregate
                                        Group Key: pagg_tab_ml_3.a
+                                       Buckets: 16
                                        ->  Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3
          ->  Finalize GroupAggregate
                Group Key: pagg_tab_ml_5.a
@@ -1208,11 +1272,13 @@ SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER B
                            ->  Parallel Append
                                  ->  Partial HashAggregate
                                        Group Key: pagg_tab_ml_5.a
+                                       Buckets: 8
                                        ->  Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5
                                  ->  Partial HashAggregate
                                        Group Key: pagg_tab_ml_6.a
+                                       Buckets: 8
                                        ->  Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6
-(41 rows)
+(46 rows)
 
 SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3;
  a  | sum  | count 
@@ -1245,20 +1311,25 @@ SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3;
                      ->  Parallel Append
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_ml.b
+                                 Buckets: 16
                                  ->  Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_ml_1.b
+                                 Buckets: 16
                                  ->  Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_ml_2.b
+                                 Buckets: 16
                                  ->  Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_ml_3.b
+                                 Buckets: 8
                                  ->  Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_ml_4.b
+                                 Buckets: 8
                                  ->  Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4
-(24 rows)
+(29 rows)
 
 SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3;
  b |  sum  | count 
@@ -1282,25 +1353,30 @@ SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 O
          ->  Parallel Append
                ->  HashAggregate
                      Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c
+                     Buckets: 512
                      Filter: (avg(pagg_tab_ml.b) > '7'::numeric)
                      ->  Seq Scan on pagg_tab_ml_p1 pagg_tab_ml
                ->  HashAggregate
                      Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c
+                     Buckets: 256
                      Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric)
                      ->  Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1
                ->  HashAggregate
                      Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c
+                     Buckets: 256
                      Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric)
                      ->  Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2
                ->  HashAggregate
                      Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c
+                     Buckets: 128
                      Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric)
                      ->  Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3
                ->  HashAggregate
                      Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c
+                     Buckets: 128
                      Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric)
                      ->  Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4
-(25 rows)
+(30 rows)
 
 SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3;
  a  | sum  | count 
@@ -1351,14 +1427,17 @@ SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) <
                      ->  Parallel Append
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_para.x
+                                 Buckets: 16
                                  ->  Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_para_1.x
+                                 Buckets: 16
                                  ->  Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_para_2.x
+                                 Buckets: 16
                                  ->  Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2
-(19 rows)
+(22 rows)
 
 SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3;
  x  | sum  |        avg         | count 
@@ -1388,14 +1467,17 @@ SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) <
                      ->  Parallel Append
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_para.y
+                                 Buckets: 32
                                  ->  Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_para_1.y
+                                 Buckets: 32
                                  ->  Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1
                            ->  Partial HashAggregate
                                  Group Key: pagg_tab_para_2.y
+                                 Buckets: 32
                                  ->  Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2
-(19 rows)
+(22 rows)
 
 SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3;
  y  |  sum  |         avg         | count 
@@ -1425,11 +1507,12 @@ SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) <
                      Sort Key: pagg_tab_para.x
                      ->  Partial HashAggregate
                            Group Key: pagg_tab_para.x
+                           Buckets: 64
                            ->  Parallel Append
                                  ->  Seq Scan on pagg_tab_para_p1 pagg_tab_para_1
                                  ->  Seq Scan on pagg_tab_para_p3 pagg_tab_para_3
                                  ->  Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_2
-(15 rows)
+(16 rows)
 
 SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3;
  x  | sum  |        avg         | count 
@@ -1459,11 +1542,12 @@ SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) <
                      Sort Key: pagg_tab_para.x
                      ->  Partial HashAggregate
                            Group Key: pagg_tab_para.x
+                           Buckets: 64
                            ->  Parallel Append
                                  ->  Seq Scan on pagg_tab_para_p1 pagg_tab_para_1
                                  ->  Seq Scan on pagg_tab_para_p2 pagg_tab_para_2
                                  ->  Seq Scan on pagg_tab_para_p3 pagg_tab_para_3
-(15 rows)
+(16 rows)
 
 SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3;
  x  | sum  |        avg         | count 
@@ -1488,17 +1572,20 @@ SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) <
    ->  Append
          ->  HashAggregate
                Group Key: pagg_tab_para.x
+               Buckets: 16
                Filter: (avg(pagg_tab_para.y) < '7'::numeric)
                ->  Seq Scan on pagg_tab_para_p1 pagg_tab_para
          ->  HashAggregate
                Group Key: pagg_tab_para_1.x
+               Buckets: 16
                Filter: (avg(pagg_tab_para_1.y) < '7'::numeric)
                ->  Seq Scan on pagg_tab_para_p2 pagg_tab_para_1
          ->  HashAggregate
                Group Key: pagg_tab_para_2.x
+               Buckets: 16
                Filter: (avg(pagg_tab_para_2.y) < '7'::numeric)
                ->  Seq Scan on pagg_tab_para_p3 pagg_tab_para_2
-(15 rows)
+(18 rows)
 
 SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3;
  x  | sum  |        avg         | count 
diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out
index b3fbe47bde..b8309304c0 100644
--- a/src/test/regress/expected/partition_join.out
+++ b/src/test/regress/expected/partition_join.out
@@ -819,6 +819,7 @@ SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHER
                Join Filter: (t1_2.a = t1_5.b)
                ->  HashAggregate
                      Group Key: t1_5.b
+                     Buckets: 4
                      ->  Hash Join
                            Hash Cond: (((t2_1.a + t2_1.b) / 2) = t1_5.b)
                            ->  Seq Scan on prt1_e_p1 t2_1
@@ -832,6 +833,7 @@ SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHER
                Join Filter: (t1_3.a = t1_6.b)
                ->  HashAggregate
                      Group Key: t1_6.b
+                     Buckets: 4
                      ->  Hash Join
                            Hash Cond: (((t2_2.a + t2_2.b) / 2) = t1_6.b)
                            ->  Seq Scan on prt1_e_p2 t2_2
@@ -845,6 +847,7 @@ SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHER
                Join Filter: (t1_4.a = t1_7.b)
                ->  HashAggregate
                      Group Key: t1_7.b
+                     Buckets: 2
                      ->  Nested Loop
                            ->  Seq Scan on prt2_p3 t1_7
                                  Filter: (a = 0)
@@ -853,7 +856,7 @@ SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHER
                ->  Index Scan using iprt1_p3_a on prt1_p3 t1_4
                      Index Cond: (a = ((t2_3.a + t2_3.b) / 2))
                      Filter: (b = 0)
-(41 rows)
+(44 rows)
 
 SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a;
   a  | b |  c   
@@ -874,6 +877,7 @@ SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (
          ->  Nested Loop
                ->  HashAggregate
                      Group Key: t1_6.b
+                     Buckets: 4
                      ->  Hash Semi Join
                            Hash Cond: (t1_6.b = ((t1_9.a + t1_9.b) / 2))
                            ->  Seq Scan on prt2_p1 t1_6
@@ -886,6 +890,7 @@ SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (
          ->  Nested Loop
                ->  HashAggregate
                      Group Key: t1_7.b
+                     Buckets: 4
                      ->  Hash Semi Join
                            Hash Cond: (t1_7.b = ((t1_10.a + t1_10.b) / 2))
                            ->  Seq Scan on prt2_p2 t1_7
@@ -898,6 +903,7 @@ SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (
          ->  Nested Loop
                ->  HashAggregate
                      Group Key: t1_8.b
+                     Buckets: 2
                      ->  Hash Semi Join
                            Hash Cond: (t1_8.b = ((t1_11.a + t1_11.b) / 2))
                            ->  Seq Scan on prt2_p3 t1_8
@@ -907,7 +913,7 @@ SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (
                ->  Index Scan using iprt1_p3_a on prt1_p3 t1_5
                      Index Cond: (a = t1_8.b)
                      Filter: (b = 0)
-(39 rows)
+(42 rows)
 
 SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a;
   a  | b |  c   
@@ -1466,6 +1472,7 @@ SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c
    Sort Key: t1.c
    ->  HashAggregate
          Group Key: t1.c, t2.c
+         Buckets: 4
          ->  Append
                ->  Hash Join
                      Hash Cond: (t2_1.c = t1_1.c)
@@ -1485,7 +1492,7 @@ SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c
                      ->  Hash
                            ->  Seq Scan on plt1_p3 t1_3
                                  Filter: ((a % 25) = 0)
-(23 rows)
+(24 rows)
 
 --
 -- multiple levels of partitioning
diff --git a/src/test/regress/expected/pg_lsn.out b/src/test/regress/expected/pg_lsn.out
index 64d41dfdad..c6a56254be 100644
--- a/src/test/regress/expected/pg_lsn.out
+++ b/src/test/regress/expected/pg_lsn.out
@@ -85,6 +85,7 @@ SELECT DISTINCT (i || '/' || j)::pg_lsn f
    Sort Key: (((((i.i)::text || '/'::text) || (j.j)::text))::pg_lsn)
    ->  HashAggregate
          Group Key: ((((i.i)::text || '/'::text) || (j.j)::text))::pg_lsn
+         Buckets: 4
          ->  Nested Loop
                ->  Function Scan on generate_series k
                ->  Materialize
@@ -93,7 +94,7 @@ SELECT DISTINCT (i || '/' || j)::pg_lsn f
                                  Filter: ((j > 0) AND (j <= 10))
                            ->  Function Scan on generate_series i
                                  Filter: (i <= 10)
-(12 rows)
+(13 rows)
 
 SELECT DISTINCT (i || '/' || j)::pg_lsn f
   FROM generate_series(1, 10) i,
diff --git a/src/test/regress/expected/select_distinct.out b/src/test/regress/expected/select_distinct.out
index f3696c6d1d..8e2903a3ed 100644
--- a/src/test/regress/expected/select_distinct.out
+++ b/src/test/regress/expected/select_distinct.out
@@ -137,9 +137,10 @@ SELECT count(*) FROM
    ->  HashAggregate
          Output: tenk1.two, tenk1.four, tenk1.two
          Group Key: tenk1.two, tenk1.four, tenk1.two
+         Buckets: 8
          ->  Seq Scan on public.tenk1
                Output: tenk1.two, tenk1.four, tenk1.two
-(7 rows)
+(8 rows)
 
 SELECT count(*) FROM
   (SELECT DISTINCT two, four, two FROM tenk1) ss;
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index 96dfb7c8dd..2b8a253c79 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -226,12 +226,14 @@ explain (costs off)
 ---------------------------------------------------
  Finalize HashAggregate
    Group Key: (length((stringu1)::text))
+   Buckets: 1024
    ->  Gather
          Workers Planned: 4
          ->  Partial HashAggregate
                Group Key: length((stringu1)::text)
+               Buckets: 1024
                ->  Parallel Seq Scan on tenk1
-(7 rows)
+(9 rows)
 
 select length(stringu1) from tenk1 group by length(stringu1);
  length 
@@ -247,12 +249,14 @@ explain (costs off)
    Sort Key: stringu1
    ->  Finalize HashAggregate
          Group Key: stringu1
+         Buckets: 1024
          ->  Gather
                Workers Planned: 4
                ->  Partial HashAggregate
                      Group Key: stringu1
+                     Buckets: 1024
                      ->  Parallel Seq Scan on tenk1
-(9 rows)
+(11 rows)
 
 -- test that parallel plan for aggregates is not selected when
 -- target list contains parallel restricted clause.
@@ -263,10 +267,11 @@ explain (costs off)
 -------------------------------------------------------------------
  HashAggregate
    Group Key: sp_parallel_restricted(unique1)
+   Buckets: 16384
    ->  Gather
          Workers Planned: 4
          ->  Parallel Index Only Scan using tenk1_unique1 on tenk1
-(5 rows)
+(6 rows)
 
 -- test prepared statement
 prepare tenk1_count(integer) As select  count((unique1)) from tenk1 where hundred > $1;
diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out
index 4c6cd5f146..e4a79170d8 100644
--- a/src/test/regress/expected/subselect.out
+++ b/src/test/regress/expected/subselect.out
@@ -932,6 +932,7 @@ where o.ten = 1;
                Filter: (ten = 1)
          ->  Subquery Scan on ss
                ->  HashSetOp Except
+                     Buckets: 2
                      ->  Append
                            ->  Subquery Scan on "*SELECT* 1"
                                  ->  Index Scan using onek_unique1 on onek i1
@@ -939,7 +940,7 @@ where o.ten = 1;
                            ->  Subquery Scan on "*SELECT* 2"
                                  ->  Index Scan using onek_unique1 on onek i2
                                        Index Cond: (unique1 = o.unique2)
-(13 rows)
+(14 rows)
 
 select count(*) from
   onek o cross join lateral (
@@ -976,10 +977,11 @@ where o.ten = 1;
          ->  CTE Scan on x
                CTE x
                  ->  Recursive Union
+                       Buckets: 64
                        ->  Result
                        ->  WorkTable Scan on x x_1
                              Filter: (a < 10)
-(10 rows)
+(11 rows)
 
 select sum(o.four), sum(ss.a) from
   onek o cross join lateral (
@@ -1130,9 +1132,10 @@ select * from int4_tbl o where (f1, f1) in
                            ->  HashAggregate
                                  Output: i.f1
                                  Group Key: i.f1
+                                 Buckets: 8
                                  ->  Seq Scan on public.int4_tbl i
                                        Output: i.f1
-(19 rows)
+(20 rows)
 
 select * from int4_tbl o where (f1, f1) in
   (select f1, generate_series(1,50) / 10 g from int4_tbl i group by f1);
diff --git a/src/test/regress/expected/tablesample.out b/src/test/regress/expected/tablesample.out
index 078358d226..fc41a81039 100644
--- a/src/test/regress/expected/tablesample.out
+++ b/src/test/regress/expected/tablesample.out
@@ -255,11 +255,12 @@ select pct, count(unique1) from
 --------------------------------------------------------
  HashAggregate
    Group Key: "*VALUES*".column1
+   Buckets: 2
    ->  Nested Loop
          ->  Values Scan on "*VALUES*"
          ->  Sample Scan on tenk1
                Sampling: bernoulli ("*VALUES*".column1)
-(6 rows)
+(7 rows)
 
 select pct, count(unique1) from
   (values (0),(100)) v(pct),
diff --git a/src/test/regress/expected/union.out b/src/test/regress/expected/union.out
index 6e72e92d80..2153974fa7 100644
--- a/src/test/regress/expected/union.out
+++ b/src/test/regress/expected/union.out
@@ -355,12 +355,13 @@ select count(*) from
  Aggregate
    ->  Subquery Scan on ss
          ->  HashSetOp Intersect
+               Buckets: 8192
                ->  Append
                      ->  Subquery Scan on "*SELECT* 2"
                            ->  Seq Scan on tenk1
                      ->  Subquery Scan on "*SELECT* 1"
                            ->  Index Only Scan using tenk1_unique1 on tenk1 tenk1_1
-(8 rows)
+(9 rows)
 
 select count(*) from
   ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss;
@@ -374,13 +375,14 @@ select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
                                QUERY PLAN                               
 ------------------------------------------------------------------------
  HashSetOp Except
+   Buckets: 16384
    ->  Append
          ->  Subquery Scan on "*SELECT* 1"
                ->  Index Only Scan using tenk1_unique1 on tenk1
          ->  Subquery Scan on "*SELECT* 2"
                ->  Index Only Scan using tenk1_unique2 on tenk1 tenk1_1
                      Filter: (unique2 <> 10)
-(7 rows)
+(8 rows)
 
 select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10;
  unique1 
@@ -585,12 +587,13 @@ select from generate_series(1,5) intersect select from generate_series(1,3);
                               QUERY PLAN                              
 ----------------------------------------------------------------------
  HashSetOp Intersect
+   Buckets: 2
    ->  Append
          ->  Subquery Scan on "*SELECT* 1"
                ->  Function Scan on generate_series
          ->  Subquery Scan on "*SELECT* 2"
                ->  Function Scan on generate_series generate_series_1
-(6 rows)
+(7 rows)
 
 select from generate_series(1,5) union select from generate_series(1,3);
 --
@@ -726,12 +729,13 @@ explain (costs off)
 ---------------------------------------------------
  HashAggregate
    Group Key: ((t1.a || t1.b))
+   Buckets: 8
    ->  Append
          ->  Index Scan using t1_ab_idx on t1
                Index Cond: ((a || b) = 'ab'::text)
          ->  Index Only Scan using t2_pkey on t2
                Index Cond: (ab = 'ab'::text)
-(7 rows)
+(8 rows)
 
 --
 -- Test that ORDER BY for UNION ALL can be pushed down to inheritance
@@ -864,11 +868,12 @@ ORDER BY x;
          Filter: (ss.x < 4)
          ->  HashAggregate
                Group Key: (1), (generate_series(1, 10))
+               Buckets: 16
                ->  Append
                      ->  ProjectSet
                            ->  Result
                      ->  Result
-(10 rows)
+(11 rows)
 
 SELECT * FROM
   (SELECT 1 AS t, generate_series(1,10) AS x
diff --git a/src/test/regress/expected/window.out b/src/test/regress/expected/window.out
index d5fd4045f9..ea34daffe4 100644
--- a/src/test/regress/expected/window.out
+++ b/src/test/regress/expected/window.out
@@ -624,8 +624,9 @@ select first_value(max(x)) over (), y
  WindowAgg
    ->  HashAggregate
          Group Key: (tenk1.ten + tenk1.four)
+         Buckets: 64
          ->  Seq Scan on tenk1
-(4 rows)
+(5 rows)
 
 -- test non-default frame specifications
 SELECT four, ten,
diff --git a/src/test/regress/expected/write_parallel.out b/src/test/regress/expected/write_parallel.out
index 0c4da2591a..25193a8b84 100644
--- a/src/test/regress/expected/write_parallel.out
+++ b/src/test/regress/expected/write_parallel.out
@@ -19,12 +19,14 @@ explain (costs off) create table parallel_write as
 ---------------------------------------------------
  Finalize HashAggregate
    Group Key: (length((stringu1)::text))
+   Buckets: 1024
    ->  Gather
          Workers Planned: 4
          ->  Partial HashAggregate
                Group Key: length((stringu1)::text)
+               Buckets: 1024
                ->  Parallel Seq Scan on tenk1
-(7 rows)
+(9 rows)
 
 create table parallel_write as
     select length(stringu1) from tenk1 group by length(stringu1);
@@ -35,12 +37,14 @@ explain (costs off) select length(stringu1) into parallel_write
 ---------------------------------------------------
  Finalize HashAggregate
    Group Key: (length((stringu1)::text))
+   Buckets: 1024
    ->  Gather
          Workers Planned: 4
          ->  Partial HashAggregate
                Group Key: length((stringu1)::text)
+               Buckets: 1024
                ->  Parallel Seq Scan on tenk1
-(7 rows)
+(9 rows)
 
 select length(stringu1) into parallel_write
     from tenk1 group by length(stringu1);
@@ -51,12 +55,14 @@ explain (costs off) create materialized view parallel_mat_view as
 ---------------------------------------------------
  Finalize HashAggregate
    Group Key: (length((stringu1)::text))
+   Buckets: 1024
    ->  Gather
          Workers Planned: 4
          ->  Partial HashAggregate
                Group Key: length((stringu1)::text)
+               Buckets: 1024
                ->  Parallel Seq Scan on tenk1
-(7 rows)
+(9 rows)
 
 create materialized view parallel_mat_view as
     select length(stringu1) from tenk1 group by length(stringu1);
@@ -67,12 +73,14 @@ explain (costs off) create table parallel_write as execute prep_stmt;
 ---------------------------------------------------
  Finalize HashAggregate
    Group Key: (length((stringu1)::text))
+   Buckets: 1024
    ->  Gather
          Workers Planned: 4
          ->  Partial HashAggregate
                Group Key: length((stringu1)::text)
+               Buckets: 1024
                ->  Parallel Seq Scan on tenk1
-(7 rows)
+(9 rows)
 
 create table parallel_write as execute prep_stmt;
 drop table parallel_write;
-- 
2.17.0

>From 41f9ea8d4a06f895872daf088603834550ce6dcc Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sun, 23 Feb 2020 23:13:07 -0600
Subject: [PATCH v6 2/7] refactor show_grouping_set_keys

---
 src/backend/commands/explain.c | 55 ++++++++++++++++++++--------------
 1 file changed, 33 insertions(+), 22 deletions(-)

diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index ff22181510..f416f60676 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -95,6 +95,8 @@ static void show_grouping_set_info(AggState *aggstate,
 								   List *ancestors,
 								   HashTableInstrumentation *inst,
 								   ExplainState *es);
+static void show_grouping_set_keys(AggState *aggstate, Agg *aggnode, List
+		*context, bool useprefix, ExplainState *es);
 static void show_group_keys(GroupState *gstate, List *ancestors,
 							ExplainState *es);
 static void show_sort_group_keys(PlanState *planstate, const char *qlabel,
@@ -2348,6 +2350,37 @@ show_grouping_set_info(AggState *aggstate,
 					   List *context, bool useprefix,
 					   List *ancestors, HashTableInstrumentation *inst,
 					   ExplainState *es)
+{
+	PlanState	*planstate = outerPlanState(aggstate);
+
+	ExplainOpenGroup("Grouping Set", NULL, true, es);
+
+	if (sortnode)
+	{
+		show_sort_group_keys(planstate, "Sort Key",
+							 sortnode->numCols, sortnode->sortColIdx,
+							 sortnode->sortOperators, sortnode->collations,
+							 sortnode->nullsFirst,
+							 ancestors, es);
+		if (es->format == EXPLAIN_FORMAT_TEXT)
+			es->indent++;
+	}
+
+	show_grouping_set_keys(aggstate, aggnode, context, useprefix, es);
+
+	if (aggnode->aggstrategy == AGG_HASHED ||
+			aggnode->aggstrategy == AGG_MIXED)
+		show_tuplehash_info(inst, es);
+
+	if (sortnode && es->format == EXPLAIN_FORMAT_TEXT)
+		es->indent--;
+
+	ExplainCloseGroup("Grouping Set", NULL, true, es);
+}
+
+/* Show keys of a grouping set */
+static void
+show_grouping_set_keys(AggState *aggstate, Agg *aggnode, List *context, bool useprefix, ExplainState *es)
 {
 	PlanState	*planstate = outerPlanState(aggstate);
 	Plan	   *plan = planstate->plan;
@@ -2369,19 +2402,6 @@ show_grouping_set_info(AggState *aggstate,
 		keysetname = "Group Keys";
 	}
 
-	ExplainOpenGroup("Grouping Set", NULL, true, es);
-
-	if (sortnode)
-	{
-		show_sort_group_keys(planstate, "Sort Key",
-							 sortnode->numCols, sortnode->sortColIdx,
-							 sortnode->sortOperators, sortnode->collations,
-							 sortnode->nullsFirst,
-							 ancestors, es);
-		if (es->format == EXPLAIN_FORMAT_TEXT)
-			es->indent++;
-	}
-
 	ExplainOpenGroup(keysetname, keysetname, false, es);
 
 	foreach(lc, gsets)
@@ -2412,15 +2432,6 @@ show_grouping_set_info(AggState *aggstate,
 	}
 
 	ExplainCloseGroup(keysetname, keysetname, false, es);
-
-	if (aggnode->aggstrategy == AGG_HASHED ||
-			aggnode->aggstrategy == AGG_MIXED)
-		show_tuplehash_info(inst, es);
-
-	if (sortnode && es->format == EXPLAIN_FORMAT_TEXT)
-		es->indent--;
-
-	ExplainCloseGroup("Grouping Set", NULL, true, es);
 }
 
 /*
-- 
2.17.0

>From 2e6e2007a9c1b4c4584a1fb6fee21443f34b6cc7 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sat, 15 Feb 2020 14:13:06 -0600
Subject: [PATCH v6 3/7] Gross hack to put hash stats of subplans in the
 right(?) place

---
 src/backend/commands/explain.c | 70 ++++++++++++++++++----------------
 1 file changed, 38 insertions(+), 32 deletions(-)

diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index f416f60676..5791ee0cb9 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -66,7 +66,7 @@ static double elapsed_time(instr_time *starttime);
 static bool ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used);
 static void ExplainNode(PlanState *planstate, List *ancestors,
 						const char *relationship, const char *plan_name,
-						ExplainState *es);
+						SubPlanState *subplanstate, ExplainState *es);
 static void show_plan_tlist(PlanState *planstate, List *ancestors,
 							ExplainState *es);
 static void show_expression(Node *node, const char *qlabel,
@@ -721,7 +721,7 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
 		ps = outerPlanState(ps);
 		es->hide_workers = true;
 	}
-	ExplainNode(ps, NIL, NULL, NULL, es);
+	ExplainNode(ps, NIL, NULL, NULL, NULL, es);
 
 	/*
 	 * If requested, include information about GUC parameters with values that
@@ -1083,7 +1083,7 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used)
 static void
 ExplainNode(PlanState *planstate, List *ancestors,
 			const char *relationship, const char *plan_name,
-			ExplainState *es)
+			SubPlanState *subplanstate, ExplainState *es)
 {
 	Plan	   *plan = planstate->plan;
 	const char *pname;			/* node type name for text output */
@@ -1340,6 +1340,21 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			ExplainIndentText(es);
 			appendStringInfo(es->str, "%s\n", plan_name);
 			es->indent++;
+
+			Assert(subplanstate != NULL);
+			/* Show hash stats for hashed subplan */
+			if (subplanstate->hashtable)
+			{
+				ExplainIndentText(es);
+				appendStringInfoString(es->str, "Hashtable: ");
+				show_tuplehash_info(&subplanstate->hashtable->instrument, es);
+			}
+			if (subplanstate->hashnulls)
+			{
+				ExplainIndentText(es);
+				appendStringInfoString(es->str, "Null Hashtable: ");
+				show_tuplehash_info(&subplanstate->hashnulls->instrument, es);
+			}
 		}
 		if (es->indent)
 		{
@@ -1368,6 +1383,20 @@ ExplainNode(PlanState *planstate, List *ancestors,
 		if (custom_name)
 			ExplainPropertyText("Custom Plan Provider", custom_name, es);
 		ExplainPropertyBool("Parallel Aware", plan->parallel_aware, es);
+
+		if (subplanstate && subplanstate->hashtable)
+		{
+			ExplainOpenGroup("Hashtable", "Hashtable", true, es);
+			show_tuplehash_info(&subplanstate->hashtable->instrument, es);
+			ExplainCloseGroup("Hashtable", "Hashtable", true, es);
+		}
+
+		if (subplanstate && subplanstate->hashnulls)
+		{
+			ExplainOpenGroup("Null Hashtable", "Null Hashtable", true, es);
+			show_tuplehash_info(&subplanstate->hashnulls->instrument, es);
+			ExplainCloseGroup("Null Hashtable", "Null Hashtable", true, es);
+		}
 	}
 
 	switch (nodeTag(plan))
@@ -2040,12 +2069,12 @@ ExplainNode(PlanState *planstate, List *ancestors,
 	/* lefttree */
 	if (outerPlanState(planstate))
 		ExplainNode(outerPlanState(planstate), ancestors,
-					"Outer", NULL, es);
+					"Outer", NULL, NULL, es);
 
 	/* righttree */
 	if (innerPlanState(planstate))
 		ExplainNode(innerPlanState(planstate), ancestors,
-					"Inner", NULL, es);
+					"Inner", NULL, NULL, es);
 
 	/* special child plans */
 	switch (nodeTag(plan))
@@ -2077,7 +2106,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			break;
 		case T_SubqueryScan:
 			ExplainNode(((SubqueryScanState *) planstate)->subplan, ancestors,
-						"Subquery", NULL, es);
+						"Subquery", NULL, NULL, es);
 			break;
 		case T_CustomScan:
 			ExplainCustomChildren((CustomScanState *) planstate,
@@ -3480,7 +3509,7 @@ ExplainMemberNodes(PlanState **planstates, int nplans,
 
 	for (j = 0; j < nplans; j++)
 		ExplainNode(planstates[j], ancestors,
-					"Member", NULL, es);
+					"Member", NULL, NULL, es);
 }
 
 /*
@@ -3538,30 +3567,7 @@ ExplainSubPlans(List *plans, List *ancestors,
 		ancestors = lcons(sp, ancestors);
 
 		ExplainNode(sps->planstate, ancestors,
-					relationship, sp->plan_name, es);
-		if (sps->hashtable)
-		{
-			ExplainOpenGroup("Hashtable", "Hashtable", true, es);
-			if (es->format == EXPLAIN_FORMAT_TEXT)
-			{
-				ExplainIndentText(es);
-				appendStringInfoString(es->str, "Hashtable: ");
-			}
-			show_tuplehash_info(&sps->hashtable->instrument, es);
-			ExplainCloseGroup("Hashtable", "Hashtable", true, es);
-		}
-
-		if (sps->hashnulls)
-		{
-			ExplainOpenGroup("Null Hashtable", "Null Hashtable", true, es);
-			if (es->format == EXPLAIN_FORMAT_TEXT)
-			{
-				ExplainIndentText(es);
-				appendStringInfoString(es->str, "Null Hashtable: ");
-			}
-			show_tuplehash_info(&sps->hashnulls->instrument, es);
-			ExplainCloseGroup("Null Hashtable", "Null Hashtable", true, es);
-		}
+					relationship, sp->plan_name, sps, es);
 
 		ancestors = list_delete_first(ancestors);
 	}
@@ -3578,7 +3584,7 @@ ExplainCustomChildren(CustomScanState *css, List *ancestors, ExplainState *es)
 	(list_length(css->custom_ps) != 1 ? "children" : "child");
 
 	foreach(cell, css->custom_ps)
-		ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es);
+		ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, NULL, es);
 }
 
 /*
-- 
2.17.0

>From 80e69b60eacd1b7e3dab2533b81ed1e5502a1d02 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Wed, 12 Feb 2020 23:40:45 -0600
Subject: [PATCH v6 4/7] implement hash stats for bitmapHeapScan..

TIDBitmap is a private structure, so add an accessor function to return its
instrumentation, and duplicate instrumentation struct in BitmapHeapState.

The instrumentation itself could be implemented in simplehash.h.  But I think
the higher layer BitmapHeapScan would have to include an instrumentation struct
anyway, since explain.c cannot look into tbm->pagetable to get .instrument (and
the pagetable structure itself doesn't match tuplehash).

Also, if instrumentation were implemented in simplehash.h, I think every
insertion or deletion would need to check ->members and ->size (which isn't
necessary for Agg, but is necessary in the general case, and specifically for
tidbitmap, since it actually DELETEs hashtable entries).  Or else simplehash
would need a new function like UpdateTupleHashStats, which the higher level nodes
would need to call after filling the hashtable or before deleting tuples, which
seems to defeat the purpose of implementing stats at a lower layer.

Note, this doesn't affect any regression tests, since hashtable isn't allocated
during "explain".  Note that "explain analyze" would show memory stats, which
we'd have to filter.
---
 src/backend/commands/explain.c            |  5 ++--
 src/backend/executor/nodeBitmapHeapscan.c |  3 +++
 src/backend/nodes/tidbitmap.c             | 29 +++++++++++++++++++++++
 src/include/nodes/execnodes.h             |  1 +
 src/include/nodes/tidbitmap.h             |  4 ++++
 5 files changed, 40 insertions(+), 2 deletions(-)

diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 5791ee0cb9..637480de79 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -1734,8 +1734,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			if (plan->qual)
 				show_instrumentation_count("Rows Removed by Filter", 1,
 										   planstate, es);
-			if (es->analyze)
-				show_tidbitmap_info((BitmapHeapScanState *) planstate, es);
+			show_tidbitmap_info((BitmapHeapScanState *) planstate, es);
 			break;
 		case T_SampleScan:
 			show_tablesample(((SampleScan *) plan)->tablesample,
@@ -2927,6 +2926,8 @@ show_tidbitmap_info(BitmapHeapScanState *planstate, ExplainState *es)
 			appendStringInfoChar(es->str, '\n');
 		}
 	}
+
+	show_tuplehash_info(&planstate->instrument, es);
 }
 
 /*
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index ae8a11da30..9ae99a326e 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -182,6 +182,8 @@ BitmapHeapNext(BitmapHeapScanState *node)
 #endif							/* USE_PREFETCH */
 		}
 		node->initialized = true;
+		if (node->tbm)
+			node->instrument = *tbm_instrumentation(node->tbm);
 	}
 
 	for (;;)
@@ -744,6 +746,7 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
 	scanstate->shared_tbmiterator = NULL;
 	scanstate->shared_prefetch_iterator = NULL;
 	scanstate->pstate = NULL;
+	memset(&scanstate->instrument, 0, sizeof(scanstate->instrument));
 
 	/*
 	 * We can potentially skip fetching heap pages if we do not need any
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index ad4e071ca3..ab81cce3b2 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -44,6 +44,7 @@
 #include "common/hashfn.h"
 #include "nodes/bitmapset.h"
 #include "nodes/tidbitmap.h"
+#include "nodes/execnodes.h"
 #include "storage/lwlock.h"
 #include "utils/dsa.h"
 
@@ -166,6 +167,7 @@ struct TIDBitmap
 	dsa_pointer ptpages;		/* dsa_pointer to the page array */
 	dsa_pointer ptchunks;		/* dsa_pointer to the chunk array */
 	dsa_area   *dsa;			/* reference to per-query dsa area */
+	HashTableInstrumentation instrument;	/* Returned by accessor function */
 };
 
 /*
@@ -294,6 +296,7 @@ tbm_create_pagetable(TIDBitmap *tbm)
 	Assert(tbm->pagetable == NULL);
 
 	tbm->pagetable = pagetable_create(tbm->mcxt, 128, tbm);
+	tbm->instrument.nbuckets_original = tbm->pagetable->size;
 
 	/* If entry1 is valid, push it into the hashtable */
 	if (tbm->status == TBM_ONE_PAGE)
@@ -1147,6 +1150,32 @@ tbm_end_iterate(TBMIterator *iterator)
 	pfree(iterator);
 }
 
+/*
+ * tbm_instrumentation - update stored stats and return pointer to
+ * instrumentation structure
+ *
+ * This updates stats when called.
+ * Returned data is within the iterator's tbm, and destroyed with it.
+ */
+HashTableInstrumentation *
+tbm_instrumentation(TIDBitmap *tbm)
+{
+	if (tbm->pagetable)
+	{
+		tbm->instrument.nbuckets = tbm->pagetable->size;
+		tbm->instrument.space_peak_hash = sizeof(PagetableEntry) * tbm->pagetable->size;
+
+		/*
+		 * If there are lossy pages, then at one point, we filled maxentries;
+		 * otherwise, number of pages is "->members".
+		 */
+		tbm->instrument.space_peak_tuples = sizeof(BlockNumber) *
+			(tbm->nchunks>0 ? tbm->maxentries : tbm->pagetable->members);
+	}
+
+	return &tbm->instrument;
+}
+
 /*
  * tbm_end_shared_iterate - finish a shared iteration over a TIDBitmap
  *
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index cfeada5f1d..b1e2d1f1ae 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -1609,6 +1609,7 @@ typedef struct BitmapHeapScanState
 	TBMSharedIterator *shared_tbmiterator;
 	TBMSharedIterator *shared_prefetch_iterator;
 	ParallelBitmapHeapState *pstate;
+	HashTableInstrumentation	instrument;
 } BitmapHeapScanState;
 
 /* ----------------
diff --git a/src/include/nodes/tidbitmap.h b/src/include/nodes/tidbitmap.h
index d562fcae34..de0cdfb91f 100644
--- a/src/include/nodes/tidbitmap.h
+++ b/src/include/nodes/tidbitmap.h
@@ -26,6 +26,9 @@
 #include "utils/dsa.h"
 
 
+/* Forward decl */
+typedef struct HashTableInstrumentation HashTableInstrumentation;
+
 /*
  * Actual bitmap representation is private to tidbitmap.c.  Callers can
  * do IsA(x, TIDBitmap) on it, but nothing else.
@@ -71,5 +74,6 @@ extern void tbm_end_shared_iterate(TBMSharedIterator *iterator);
 extern TBMSharedIterator *tbm_attach_shared_iterate(dsa_area *dsa,
 													dsa_pointer dp);
 extern long tbm_calculate_entries(double maxbytes);
+extern HashTableInstrumentation *tbm_instrumentation(TIDBitmap *tbm);
 
 #endif							/* TIDBITMAP_H */
-- 
2.17.0

>From 320bddf8fbefe31c150406187fcf8757e8a3ebba Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sun, 9 Feb 2020 15:08:14 -0600
Subject: [PATCH v6 5/7] Refactor for consistency/symmetry

This moves hash instrumentation out of execGrouping.c / TupleHashTable and into
higher level nodes, for consistency with bitmapHeapScan.

This might be unimportant and maybe clearer left in execGrouping.c.
---
 src/backend/commands/explain.c            | 18 +++++++--------
 src/backend/executor/execGrouping.c       | 27 -----------------------
 src/backend/executor/nodeAgg.c            | 14 ++++++++----
 src/backend/executor/nodeRecursiveunion.c |  3 ++-
 src/backend/executor/nodeSetOp.c          |  6 ++++-
 src/backend/executor/nodeSubplan.c        | 10 +++++++--
 src/include/executor/executor.h           |  1 -
 src/include/executor/nodeAgg.h            |  1 +
 src/include/nodes/execnodes.h             | 24 ++++++++++++++++++--
 9 files changed, 57 insertions(+), 47 deletions(-)

diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 637480de79..eb093029d9 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -1347,13 +1347,13 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			{
 				ExplainIndentText(es);
 				appendStringInfoString(es->str, "Hashtable: ");
-				show_tuplehash_info(&subplanstate->hashtable->instrument, es);
+				show_tuplehash_info(&subplanstate->instrument, es);
 			}
 			if (subplanstate->hashnulls)
 			{
 				ExplainIndentText(es);
 				appendStringInfoString(es->str, "Null Hashtable: ");
-				show_tuplehash_info(&subplanstate->hashnulls->instrument, es);
+				show_tuplehash_info(&subplanstate->instrument_nulls, es);
 			}
 		}
 		if (es->indent)
@@ -1387,14 +1387,14 @@ ExplainNode(PlanState *planstate, List *ancestors,
 		if (subplanstate && subplanstate->hashtable)
 		{
 			ExplainOpenGroup("Hashtable", "Hashtable", true, es);
-			show_tuplehash_info(&subplanstate->hashtable->instrument, es);
+			show_tuplehash_info(&subplanstate->instrument, es);
 			ExplainCloseGroup("Hashtable", "Hashtable", true, es);
 		}
 
 		if (subplanstate && subplanstate->hashnulls)
 		{
 			ExplainOpenGroup("Null Hashtable", "Null Hashtable", true, es);
-			show_tuplehash_info(&subplanstate->hashnulls->instrument, es);
+			show_tuplehash_info(&subplanstate->instrument_nulls, es);
 			ExplainCloseGroup("Null Hashtable", "Null Hashtable", true, es);
 		}
 	}
@@ -1925,14 +1925,14 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			{
 				SetOpState *sos = castNode(SetOpState, planstate);
 				if (sos->hashtable)
-					show_tuplehash_info(&sos->hashtable->instrument, es);
+					show_tuplehash_info(&sos->instrument, es);
 			}
 			break;
 		case T_RecursiveUnion:
 			{
 				RecursiveUnionState *rus = (RecursiveUnionState *)planstate;
 				if (rus->hashtable)
-					show_tuplehash_info(&rus->hashtable->instrument, es);
+					show_tuplehash_info(&rus->instrument, es);
 				break;
 			}
 		case T_Group:
@@ -2320,7 +2320,7 @@ show_agg_keys(AggState *astate, List *ancestors,
 								 ancestors, es);
 			Assert(astate->num_hashes <= 1);
 			if (astate->num_hashes)
-				show_tuplehash_info(&astate->perhash[0].hashtable->instrument, es);
+				show_tuplehash_info(&astate->perhash[0].instrument, es);
 		}
 
 		ancestors = list_delete_first(ancestors);
@@ -2347,7 +2347,7 @@ show_grouping_sets(AggState *aggstate, Agg *agg,
 
 	show_grouping_set_info(aggstate, agg, NULL, context, useprefix, ancestors,
 			aggstate->num_hashes ?
-			&aggstate->perhash[setno++].hashtable->instrument : NULL,
+			&aggstate->perhash[setno++].instrument : NULL,
 			es);
 
 	foreach(lc, agg->chain)
@@ -2360,7 +2360,7 @@ show_grouping_sets(AggState *aggstate, Agg *agg,
 				aggnode->aggstrategy == AGG_MIXED)
 		{
 			Assert(setno < aggstate->num_hashes);
-			inst = &aggstate->perhash[setno++].hashtable->instrument;
+			inst = &aggstate->perhash[setno++].instrument;
 		}
 
 		show_grouping_set_info(aggstate, aggnode, sortnode,
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 2f5258e1d2..009d27b9a8 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -188,7 +188,6 @@ BuildTupleHashTableExt(PlanState *parent,
 	hashtable->inputslot = NULL;
 	hashtable->in_hash_funcs = NULL;
 	hashtable->cur_eq_func = NULL;
-	memset(&hashtable->instrument, 0, sizeof(hashtable->instrument));
 
 	/*
 	 * If parallelism is in use, even if the master backend is performing the
@@ -204,7 +203,6 @@ BuildTupleHashTableExt(PlanState *parent,
 		hashtable->hash_iv = 0;
 
 	hashtable->hashtab = tuplehash_create(metacxt, nbuckets, hashtable);
-	UpdateTupleHashTableStats(hashtable, true);
 
 	/*
 	 * We copy the input tuple descriptor just for safety --- we assume all
@@ -283,34 +281,9 @@ BuildTupleHashTable(PlanState *parent,
 void
 ResetTupleHashTable(TupleHashTable hashtable)
 {
-	UpdateTupleHashTableStats(hashtable, false);
 	tuplehash_reset(hashtable->hashtab);
 }
 
-/* Update instrumentation stats */
-void
-UpdateTupleHashTableStats(TupleHashTable hashtable, bool initial)
-{
-	hashtable->instrument.nbuckets = hashtable->hashtab->size;
-	if (initial)
-	{
-		hashtable->instrument.nbuckets_original = hashtable->hashtab->size;
-		hashtable->instrument.space_peak_hash = hashtable->hashtab->size *
-			sizeof(TupleHashEntryData);
-		hashtable->instrument.space_peak_tuples = 0;
-	}
-	else
-	{
-#define maxself(a,b) a=Max(a,b)
-		/* hashtable->entrysize includes additionalsize */
-		maxself(hashtable->instrument.space_peak_hash,
-				hashtable->hashtab->size * sizeof(TupleHashEntryData));
-		maxself(hashtable->instrument.space_peak_tuples,
-				hashtable->hashtab->members * hashtable->entrysize);
-#undef maxself
-	}
-}
-
 /*
  * Find or create a hashtable entry for the tuple group containing the
  * given tuple.  The tuple must be the same type as the hashtable entries.
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 4b8525edb6..3d60f7a314 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1334,6 +1334,9 @@ build_hash_table(AggState *aggstate, int setno, long nbuckets)
 		hashcxt,
 		tmpcxt,
 		DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
+
+	InitTupleHashTableStats(perhash->instrument,
+			perhash->hashtable->hashtab, additionalsize);
 }
 
 /*
@@ -1710,9 +1713,10 @@ agg_retrieve_direct(AggState *aggstate)
 				 */
 				initialize_phase(aggstate, 0);
 				aggstate->table_filled = true;
-				UpdateTupleHashTableStats(aggstate->perhash[0].hashtable, false);
+				UpdateTupleHashTableStats(aggstate->perhash[0].instrument,
+						aggstate->perhash[0].hashtable->hashtab);
 				ResetTupleHashIterator(aggstate->perhash[0].hashtable,
-									   &aggstate->perhash[0].hashiter);
+						&aggstate->perhash[0].hashiter);
 				select_current_set(aggstate, 0, true);
 				return agg_retrieve_hash_table(aggstate);
 			}
@@ -1913,7 +1917,8 @@ agg_retrieve_direct(AggState *aggstate)
 						aggstate->current_phase == 1)
 				{
 					for (int i = 0; i < aggstate->num_hashes; i++)
-						UpdateTupleHashTableStats(aggstate->perhash[i].hashtable, false);
+						UpdateTupleHashTableStats(aggstate->perhash[i].instrument,
+								aggstate->perhash[i].hashtable->hashtab);
 				}
 			}
 
@@ -1990,7 +1995,8 @@ agg_fill_hash_table(AggState *aggstate)
 
 	aggstate->table_filled = true;
 	for (int i = 0; i < aggstate->num_hashes; i++)
-		UpdateTupleHashTableStats(aggstate->perhash[i].hashtable, false);
+		UpdateTupleHashTableStats(aggstate->perhash[i].instrument,
+				aggstate->perhash[i].hashtable->hashtab);
 
 	/* Initialize to walk the first hash table */
 	select_current_set(aggstate, 0, true);
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index 93272c28b1..594abdbdf0 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -50,6 +50,7 @@ build_hash_table(RecursiveUnionState *rustate)
 												rustate->tableContext,
 												rustate->tempContext,
 												false);
+	InitTupleHashTableStats(rustate->instrument, rustate->hashtable->hashtab, 0);
 }
 
 
@@ -157,7 +158,7 @@ ExecRecursiveUnion(PlanState *pstate)
 	}
 
 	if (node->hashtable)
-		UpdateTupleHashTableStats(node->hashtable, false);
+		UpdateTupleHashTableStats(node->instrument, node->hashtable->hashtab);
 
 	return NULL;
 }
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 9c0e0ab96e..a3860758eb 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -139,6 +139,9 @@ build_hash_table(SetOpState *setopstate)
 												   setopstate->tableContext,
 												   econtext->ecxt_per_tuple_memory,
 												   false);
+
+	InitTupleHashTableStats(setopstate->instrument,
+			setopstate->hashtable->hashtab, 0);
 }
 
 /*
@@ -415,7 +418,8 @@ setop_fill_hash_table(SetOpState *setopstate)
 
 	setopstate->table_filled = true;
 	/* Initialize to walk the hash table */
-	UpdateTupleHashTableStats(setopstate->hashtable, false);
+	UpdateTupleHashTableStats(setopstate->instrument,
+			setopstate->hashtable->hashtab);
 	ResetTupleHashIterator(setopstate->hashtable, &setopstate->hashiter);
 }
 
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 22c32612ba..03533538f6 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -505,6 +505,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 	if (node->hashtable)
 		ResetTupleHashTable(node->hashtable);
 	else
+	{
 		node->hashtable = BuildTupleHashTableExt(node->parent,
 												 node->descRight,
 												 ncols,
@@ -518,6 +519,8 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 												 node->hashtablecxt,
 												 node->hashtempcxt,
 												 false);
+		InitTupleHashTableStats(node->instrument, node->hashtable->hashtab, 0);
+	}
 
 	if (!subplan->unknownEqFalse)
 	{
@@ -533,6 +536,7 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 		if (node->hashnulls)
 			ResetTupleHashTable(node->hashnulls);
 		else
+		{
 			node->hashnulls = BuildTupleHashTableExt(node->parent,
 													 node->descRight,
 													 ncols,
@@ -546,6 +550,8 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 													 node->hashtablecxt,
 													 node->hashtempcxt,
 													 false);
+			InitTupleHashTableStats(node->instrument_nulls, node->hashnulls->hashtab, 0);
+		}
 	}
 	else
 		node->hashnulls = NULL;
@@ -621,9 +627,9 @@ buildSubPlanHash(SubPlanState *node, ExprContext *econtext)
 	ExecClearTuple(node->projRight->pi_state.resultslot);
 
 	MemoryContextSwitchTo(oldcontext);
-	UpdateTupleHashTableStats(node->hashtable, false);
+	UpdateTupleHashTableStats(node->instrument, node->hashtable->hashtab);
 	if (node->hashnulls)
-		UpdateTupleHashTableStats(node->hashnulls, false);
+		UpdateTupleHashTableStats(node->instrument_nulls, node->hashnulls->hashtab);
 }
 
 /*
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 34199b57e6..81fdfa4add 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -150,7 +150,6 @@ extern TupleHashEntry FindTupleHashEntry(TupleHashTable hashtable,
 										 ExprState *eqcomp,
 										 FmgrInfo *hashfunctions);
 extern void ResetTupleHashTable(TupleHashTable hashtable);
-extern void UpdateTupleHashTableStats(TupleHashTable hashtable, bool initial);
 
 /*
  * prototypes from functions in execJunk.c
diff --git a/src/include/executor/nodeAgg.h b/src/include/executor/nodeAgg.h
index 264916f9a9..2072c18b54 100644
--- a/src/include/executor/nodeAgg.h
+++ b/src/include/executor/nodeAgg.h
@@ -302,6 +302,7 @@ typedef struct AggStatePerHashData
 	AttrNumber *hashGrpColIdxInput; /* hash col indices in input slot */
 	AttrNumber *hashGrpColIdxHash;	/* indices in hash table tuples */
 	Agg		   *aggnode;		/* original Agg node, for numGroups etc. */
+	HashTableInstrumentation    instrument;
 }			AggStatePerHashData;
 
 
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index b1e2d1f1ae..11fe866c1c 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -691,8 +691,25 @@ typedef struct TupleHashEntryData
 #define SH_DECLARE
 #include "lib/simplehash.h"
 
+#define InitTupleHashTableStats(instr, htable, addsize) \
+	do{\
+	instr.entrysize = sizeof(MinimalTuple) + addsize; \
+	instr.nbuckets = htable->size; \
+	instr.nbuckets_original = htable->size; \
+	instr.space_peak_hash = htable->size * sizeof(TupleHashEntryData); \
+	instr.space_peak_tuples = 0; \
+	}while(0)
+
+#define UpdateTupleHashTableStats(instr, htable) \
+	do{\
+	instr.nbuckets = htable->size; \
+	instr.space_peak_hash = Max(instr.space_peak_hash, htable->size*sizeof(TupleHashEntryData)); \
+	instr.space_peak_tuples = Max(instr.space_peak_tuples, htable->members*instr.entrysize );\
+	}while(0)
+
 typedef struct HashTableInstrumentation
 {
+	size_t	entrysize;				/* Includes additionalsize */
 	size_t	nbuckets;				/* number of buckets at end of execution */
 	size_t	nbuckets_original;		/* planned number of buckets */
 	size_t	space_peak_hash;	/* peak memory usage in bytes */
@@ -717,7 +734,6 @@ typedef struct TupleHashTableData
 	ExprState  *cur_eq_func;	/* comparator for input vs. table */
 	uint32		hash_iv;		/* hash-function IV */
 	ExprContext *exprcontext;	/* expression context */
-	HashTableInstrumentation instrument;
 }			TupleHashTableData;
 
 typedef tuplehash_iterator TupleHashIterator;
@@ -883,6 +899,8 @@ typedef struct SubPlanState
 	FmgrInfo   *lhs_hash_funcs; /* hash functions for lefthand datatype(s) */
 	FmgrInfo   *cur_eq_funcs;	/* equality functions for LHS vs. table */
 	ExprState  *cur_eq_comp;	/* equality comparator for LHS vs. table */
+	HashTableInstrumentation instrument;
+	HashTableInstrumentation instrument_nulls; /* instrumentation for nulls hashtable */
 } SubPlanState;
 
 /* ----------------
@@ -1291,6 +1309,7 @@ typedef struct RecursiveUnionState
 	MemoryContext tempContext;	/* short-term context for comparisons */
 	TupleHashTable hashtable;	/* hash table for tuples already seen */
 	MemoryContext tableContext; /* memory context containing hash table */
+	HashTableInstrumentation instrument;
 } RecursiveUnionState;
 
 /* ----------------
@@ -1609,7 +1628,7 @@ typedef struct BitmapHeapScanState
 	TBMSharedIterator *shared_tbmiterator;
 	TBMSharedIterator *shared_prefetch_iterator;
 	ParallelBitmapHeapState *pstate;
-	HashTableInstrumentation	instrument;
+	HashTableInstrumentation instrument;
 } BitmapHeapScanState;
 
 /* ----------------
@@ -2324,6 +2343,7 @@ typedef struct SetOpState
 	MemoryContext tableContext; /* memory context containing hash table */
 	bool		table_filled;	/* hash table filled yet? */
 	TupleHashIterator hashiter; /* for iterating through hash table */
+	HashTableInstrumentation instrument;
 } SetOpState;
 
 /* ----------------
-- 
2.17.0

>From 94597b3250353c6f6915235026ded3e1c2bde24b Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sat, 15 Feb 2020 17:19:21 -0600
Subject: [PATCH v6 6/7] TupleHashTable.entrysize was unused except for
 instrumentation..

---
 src/backend/executor/execGrouping.c | 1 -
 src/include/nodes/execnodes.h       | 1 -
 2 files changed, 2 deletions(-)

diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 009d27b9a8..98fd4bf8bd 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -183,7 +183,6 @@ BuildTupleHashTableExt(PlanState *parent,
 	hashtable->tab_collations = collations;
 	hashtable->tablecxt = tablecxt;
 	hashtable->tempcxt = tempcxt;
-	hashtable->entrysize = entrysize;
 	hashtable->tableslot = NULL;	/* will be made on first lookup */
 	hashtable->inputslot = NULL;
 	hashtable->in_hash_funcs = NULL;
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 11fe866c1c..3a335d87a0 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -726,7 +726,6 @@ typedef struct TupleHashTableData
 	Oid		   *tab_collations; /* collations for hash and comparison */
 	MemoryContext tablecxt;		/* memory context containing table */
 	MemoryContext tempcxt;		/* context for function evaluations */
-	Size		entrysize;		/* actual size to make each hash entry */
 	TupleTableSlot *tableslot;	/* slot for referencing table entries */
 	/* The following fields are set transiently for each table search: */
 	TupleTableSlot *inputslot;	/* current input tuple's slot */
-- 
2.17.0

>From f65ce612edc9545188c8531637980420b101f94f Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sat, 15 Feb 2020 15:53:34 -0600
Subject: [PATCH v6 7/7] Update comment obsolete since 69c3936a

---
 src/backend/executor/nodeAgg.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 3d60f7a314..567d65bb6c 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1538,8 +1538,7 @@ lookup_hash_entry(AggState *aggstate, uint32 hash)
 }
 
 /*
- * Look up hash entries for the current tuple in all hashed grouping sets,
- * returning an array of pergroup pointers suitable for advance_aggregates.
+ * Look up hash entries for the current tuple in all hashed grouping sets.
  *
  * Be aware that lookup_hash_entry can reset the tmpcontext.
  */
-- 
2.17.0

Reply via email to