From ab7f942056dfad1875d7b5f012f53ed94e5c9e9b Mon Sep 17 00:00:00 2001
From: Peter Geoghegan <pg@bowt.ie>
Date: Thu, 2 Jul 2020 16:50:49 -0700
Subject: [PATCH] Add a GUC that limits memory use for hash tables.

---
 src/include/executor/hashjoin.h               |  4 +-
 src/include/executor/nodeHash.h               |  2 +-
 src/include/miscadmin.h                       |  1 +
 src/backend/executor/execGrouping.c           |  4 +-
 src/backend/executor/nodeAgg.c                | 28 +++++------
 src/backend/executor/nodeHash.c               | 40 ++++++++--------
 src/backend/executor/nodeHashjoin.c           |  4 +-
 src/backend/optimizer/path/costsize.c         | 10 ++--
 src/backend/optimizer/plan/planner.c          | 35 +++++++-------
 src/backend/optimizer/plan/subselect.c        |  8 ++--
 src/backend/optimizer/prep/prepunion.c        |  8 ++--
 src/backend/optimizer/util/pathnode.c         |  2 +-
 src/backend/utils/adt/ri_triggers.c           |  8 +++-
 src/backend/utils/init/globals.c              |  1 +
 src/backend/utils/misc/guc.c                  | 15 +++++-
 src/backend/utils/misc/postgresql.conf.sample |  1 +
 src/test/regress/expected/groupingsets.out    |  8 +++-
 src/test/regress/expected/join_hash.out       | 48 +++++++++----------
 src/test/regress/sql/groupingsets.sql         |  8 +++-
 src/test/regress/sql/join_hash.sql            | 48 +++++++++----------
 20 files changed, 158 insertions(+), 125 deletions(-)

diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h
index 79b634e8ed..eb5daba36b 100644
--- a/src/include/executor/hashjoin.h
+++ b/src/include/executor/hashjoin.h
@@ -88,7 +88,7 @@ typedef struct HashJoinTupleData
  * outer relation tuples with these hash values are matched against that
  * table instead of the main one.  Thus, tuples with these hash values are
  * effectively handled as part of the first batch and will never go to disk.
- * The skew hashtable is limited to SKEW_WORK_MEM_PERCENT of the total memory
+ * The skew hashtable is limited to SKEW_HASH_MEM_PERCENT of the total memory
  * allowed for the join; while building the hashtables, we decrease the number
  * of MCVs being specially treated if needed to stay under this limit.
  *
@@ -107,7 +107,7 @@ typedef struct HashSkewBucket
 
 #define SKEW_BUCKET_OVERHEAD  MAXALIGN(sizeof(HashSkewBucket))
 #define INVALID_SKEW_BUCKET_NO	(-1)
-#define SKEW_WORK_MEM_PERCENT  2
+#define SKEW_HASH_MEM_PERCENT  2
 #define SKEW_MIN_OUTER_FRACTION  0.01
 
 /*
diff --git a/src/include/executor/nodeHash.h b/src/include/executor/nodeHash.h
index 64d2ce693c..2db4e2f672 100644
--- a/src/include/executor/nodeHash.h
+++ b/src/include/executor/nodeHash.h
@@ -61,7 +61,7 @@ extern bool ExecScanHashTableForUnmatched(HashJoinState *hjstate,
 extern void ExecHashTableReset(HashJoinTable hashtable);
 extern void ExecHashTableResetMatchFlags(HashJoinTable hashtable);
 extern void ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
-									bool try_combined_work_mem,
+									bool try_combined_hash_mem,
 									int parallel_workers,
 									size_t *space_allowed,
 									int *numbuckets,
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 18bc8a7b90..f725ac788e 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -243,6 +243,7 @@ extern PGDLLIMPORT int IntervalStyle;
 extern bool enableFsync;
 extern PGDLLIMPORT bool allowSystemTableMods;
 extern PGDLLIMPORT int work_mem;
+extern PGDLLIMPORT int hash_mem;
 extern PGDLLIMPORT int maintenance_work_mem;
 extern PGDLLIMPORT int max_parallel_maintenance_workers;
 
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 8be36ca763..cc09a40223 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -170,8 +170,8 @@ BuildTupleHashTableExt(PlanState *parent,
 
 	Assert(nbuckets > 0);
 
-	/* Limit initial table size request to not more than work_mem */
-	nbuckets = Min(nbuckets, (long) ((work_mem * 1024L) / entrysize));
+	/* Limit initial table size request to not more than hash_mem */
+	nbuckets = Min(nbuckets, (long) ((hash_mem * 1024L) / entrysize));
 
 	oldcontext = MemoryContextSwitchTo(metacxt);
 
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index a20554ae65..647dc4d91c 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -203,7 +203,7 @@
  *	  entries (and initialize new transition states), we instead spill them to
  *	  disk to be processed later. The tuples are spilled in a partitioned
  *	  manner, so that subsequent batches are smaller and less likely to exceed
- *	  work_mem (if a batch does exceed work_mem, it must be spilled
+ *	  hash_mem (if a batch does exceed hash_mem, it must be spilled
  *	  recursively).
  *
  *	  Spilled data is written to logical tapes. These provide better control
@@ -212,7 +212,7 @@
  *
  *	  Note that it's possible for transition states to start small but then
  *	  grow very large; for instance in the case of ARRAY_AGG. In such cases,
- *	  it's still possible to significantly exceed work_mem. We try to avoid
+ *	  it's still possible to significantly exceed hash_mem. We try to avoid
  *	  this situation by estimating what will fit in the available memory, and
  *	  imposing a limit on the number of groups separately from the amount of
  *	  memory consumed.
@@ -1482,7 +1482,7 @@ build_hash_table(AggState *aggstate, int setno, long nbuckets)
 
 	/*
 	 * Used to make sure initial hash table allocation does not exceed
-	 * work_mem. Note that the estimate does not include space for
+	 * hash_mem. Note that the estimate does not include space for
 	 * pass-by-reference transition data values, nor for the representative
 	 * tuple of each group.
 	 */
@@ -1734,7 +1734,7 @@ hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
 }
 
 /*
- * Set limits that trigger spilling to avoid exceeding work_mem. Consider the
+ * Set limits that trigger spilling to avoid exceeding hash_mem. Consider the
  * number of partitions we expect to create (if we do spill).
  *
  * There are two limits: a memory limit, and also an ngroups limit. The
@@ -1749,12 +1749,12 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
 	int			npartitions;
 	Size		partition_mem;
 
-	/* if not expected to spill, use all of work_mem */
-	if (input_groups * hashentrysize < work_mem * 1024L)
+	/* if not expected to spill, use all of hash_mem */
+	if (input_groups * hashentrysize < hash_mem * 1024L)
 	{
 		if (num_partitions != NULL)
 			*num_partitions = 0;
-		*mem_limit = work_mem * 1024L;
+		*mem_limit = hash_mem * 1024L;
 		*ngroups_limit = *mem_limit / hashentrysize;
 		return;
 	}
@@ -1776,14 +1776,14 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
 		HASHAGG_WRITE_BUFFER_SIZE * npartitions;
 
 	/*
-	 * Don't set the limit below 3/4 of work_mem. In that case, we are at the
+	 * Don't set the limit below 3/4 of hash_mem. In that case, we are at the
 	 * minimum number of partitions, so we aren't going to dramatically exceed
 	 * work mem anyway.
 	 */
-	if (work_mem * 1024L > 4 * partition_mem)
-		*mem_limit = work_mem * 1024L - partition_mem;
+	if (hash_mem * 1024L > 4 * partition_mem)
+		*mem_limit = hash_mem * 1024L - partition_mem;
 	else
-		*mem_limit = work_mem * 1024L * 0.75;
+		*mem_limit = hash_mem * 1024L * 0.75;
 
 	if (*mem_limit > hashentrysize)
 		*ngroups_limit = *mem_limit / hashentrysize;
@@ -1944,16 +1944,16 @@ hash_choose_num_partitions(uint64 input_groups, double hashentrysize,
 
 	/*
 	 * Avoid creating so many partitions that the memory requirements of the
-	 * open partition files are greater than 1/4 of work_mem.
+	 * open partition files are greater than 1/4 of hash_mem.
 	 */
 	partition_limit =
-		(work_mem * 1024L * 0.25 - HASHAGG_READ_BUFFER_SIZE) /
+		(hash_mem * 1024L * 0.25 - HASHAGG_READ_BUFFER_SIZE) /
 		HASHAGG_WRITE_BUFFER_SIZE;
 
 	mem_wanted = HASHAGG_PARTITION_FACTOR * input_groups * hashentrysize;
 
 	/* make enough partitions so that each one is likely to fit in memory */
-	npartitions = 1 + (mem_wanted / (work_mem * 1024L));
+	npartitions = 1 + (mem_wanted / (hash_mem * 1024L));
 
 	if (npartitions > partition_limit)
 		npartitions = partition_limit;
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 45b342011f..699fc7b57a 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -506,7 +506,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
 	hashtable->spaceAllowed = space_allowed;
 	hashtable->spaceUsedSkew = 0;
 	hashtable->spaceAllowedSkew =
-		hashtable->spaceAllowed * SKEW_WORK_MEM_PERCENT / 100;
+		hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
 	hashtable->chunks = NULL;
 	hashtable->current_chunk = NULL;
 	hashtable->parallel_state = state->parallel_state;
@@ -665,7 +665,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
 
 void
 ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
-						bool try_combined_work_mem,
+						bool try_combined_hash_mem,
 						int parallel_workers,
 						size_t *space_allowed,
 						int *numbuckets,
@@ -698,16 +698,16 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 	inner_rel_bytes = ntuples * tupsize;
 
 	/*
-	 * Target in-memory hashtable size is work_mem kilobytes.
+	 * Target in-memory hashtable size is hashmem kilobytes.
 	 */
-	hash_table_bytes = work_mem * 1024L;
+	hash_table_bytes = hash_mem * 1024L;
 
 	/*
-	 * Parallel Hash tries to use the combined work_mem of all workers to
-	 * avoid the need to batch.  If that won't work, it falls back to work_mem
+	 * Parallel Hash tries to use the combined hash_mem of all workers to
+	 * avoid the need to batch.  If that won't work, it falls back to hash_mem
 	 * per worker and tries to process batches in parallel.
 	 */
-	if (try_combined_work_mem)
+	if (try_combined_hash_mem)
 		hash_table_bytes += hash_table_bytes * parallel_workers;
 
 	*space_allowed = hash_table_bytes;
@@ -728,7 +728,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 	 */
 	if (useskew)
 	{
-		skew_table_bytes = hash_table_bytes * SKEW_WORK_MEM_PERCENT / 100;
+		skew_table_bytes = hash_table_bytes * SKEW_HASH_MEM_PERCENT / 100;
 
 		/*----------
 		 * Divisor is:
@@ -751,7 +751,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 	/*
 	 * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
 	 * memory is filled, assuming a single batch; but limit the value so that
-	 * the pointer arrays we'll try to allocate do not exceed work_mem nor
+	 * the pointer arrays we'll try to allocate do not exceed hash_mem nor
 	 * MaxAllocSize.
 	 *
 	 * Note that both nbuckets and nbatch must be powers of 2 to make
@@ -790,10 +790,10 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 		long		bucket_size;
 
 		/*
-		 * If Parallel Hash with combined work_mem would still need multiple
-		 * batches, we'll have to fall back to regular work_mem budget.
+		 * If Parallel Hash with combined hash_mem would still need multiple
+		 * batches, we'll have to fall back to regular hash_mem budget.
 		 */
-		if (try_combined_work_mem)
+		if (try_combined_hash_mem)
 		{
 			ExecChooseHashTableSize(ntuples, tupwidth, useskew,
 									false, parallel_workers,
@@ -805,7 +805,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 		}
 
 		/*
-		 * Estimate the number of buckets we'll want to have when work_mem is
+		 * Estimate the number of buckets we'll want to have when hash_mem is
 		 * entirely full.  Each bucket will contain a bucket pointer plus
 		 * NTUP_PER_BUCKET tuples, whose projected size already includes
 		 * overhead for the hash code, pointer to the next tuple, etc.
@@ -820,8 +820,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
 		/*
 		 * Buckets are simple pointers to hashjoin tuples, while tupsize
 		 * includes the pointer, hash code, and MinimalTupleData.  So buckets
-		 * should never really exceed 25% of work_mem (even for
-		 * NTUP_PER_BUCKET=1); except maybe for work_mem values that are not
+		 * should never really exceed 25% of hash_mem (even for
+		 * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
 		 * 2^N bytes, where we might get more because of doubling. So let's
 		 * look for 50% here.
 		 */
@@ -1098,13 +1098,13 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
 					/*
 					 * We are going from single-batch to multi-batch.  We need
 					 * to switch from one large combined memory budget to the
-					 * regular work_mem budget.
+					 * regular hash_mem budget.
 					 */
-					pstate->space_allowed = work_mem * 1024L;
+					pstate->space_allowed = hash_mem * 1024L;
 
 					/*
-					 * The combined work_mem of all participants wasn't
-					 * enough. Therefore one batch per participant would be
+					 * The combined hash_mem of all participants wasn't
+					 * enough.  Therefore one batch per participant would be
 					 * approximately equivalent and would probably also be
 					 * insufficient.  So try two batches per participant,
 					 * rounded up to a power of two.
@@ -2855,7 +2855,7 @@ ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
 
 		/*
 		 * Check if our space limit would be exceeded.  To avoid choking on
-		 * very large tuples or very low work_mem setting, we'll always allow
+		 * very large tuples or very low hash_mem setting, we'll always allow
 		 * each backend to allocate at least one chunk.
 		 */
 		if (hashtable->batches[0].at_least_one_chunk &&
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 9bb23fef1a..5532b91a71 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -89,9 +89,9 @@
  * PHJ_BUILD_HASHING_INNER so we can skip loading.
  *
  * Initially we try to plan for a single-batch hash join using the combined
- * work_mem of all participants to create a large shared hash table.  If that
+ * hash_mem of all participants to create a large shared hash table.  If that
  * turns out either at planning or execution time to be impossible then we
- * fall back to regular work_mem sized hash tables.
+ * fall back to regular hash_mem sized hash tables.
  *
  * To avoid deadlocks, we never wait for any barrier unless it is known that
  * all other backends attached to it are actively executing the node or have
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 4ff3c7a2fd..d257db569c 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -3526,7 +3526,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
 	 * Get hash table size that executor would use for inner relation.
 	 *
 	 * XXX for the moment, always assume that skew optimization will be
-	 * performed.  As long as SKEW_WORK_MEM_PERCENT is small, it's not worth
+	 * performed.  As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
 	 * trying to determine that for sure.
 	 *
 	 * XXX at some point it might be interesting to try to account for skew
@@ -3535,7 +3535,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
 	ExecChooseHashTableSize(inner_path_rows_total,
 							inner_path->pathtarget->width,
 							true,	/* useskew */
-							parallel_hash,	/* try_combined_work_mem */
+							parallel_hash,	/* try_combined_hash_mem */
 							outer_path->parallel_workers,
 							&space_allowed,
 							&numbuckets,
@@ -3716,16 +3716,16 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
 	}
 
 	/*
-	 * If the bucket holding the inner MCV would exceed work_mem, we don't
+	 * If the bucket holding the inner MCV would exceed hash_mem, we don't
 	 * want to hash unless there is really no other alternative, so apply
 	 * disable_cost.  (The executor normally copes with excessive memory usage
 	 * by splitting batches, but obviously it cannot separate equal values
-	 * that way, so it will be unable to drive the batch size below work_mem
+	 * that way, so it will be unable to drive the batch size below hash_mem
 	 * when this is true.)
 	 */
 	if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
 						   inner_path->pathtarget->width) >
-		(work_mem * 1024L))
+		(hash_mem * 1024L))
 		startup_cost += disable_cost;
 
 	/*
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 4131019fc9..90a38bc261 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -4201,11 +4201,11 @@ consider_groupingsets_paths(PlannerInfo *root,
 	 * If we're not being offered sorted input, then only consider plans that
 	 * can be done entirely by hashing.
 	 *
-	 * We can hash everything if it looks like it'll fit in work_mem. But if
+	 * We can hash everything if it looks like it'll fit in hash_mem. But if
 	 * the input is actually sorted despite not being advertised as such, we
 	 * prefer to make use of that in order to use less memory.
 	 *
-	 * If none of the grouping sets are sortable, then ignore the work_mem
+	 * If none of the grouping sets are sortable, then ignore the hash_mem
 	 * limit and generate a path anyway, since otherwise we'll just fail.
 	 */
 	if (!is_sorted)
@@ -4257,10 +4257,10 @@ consider_groupingsets_paths(PlannerInfo *root,
 
 		/*
 		 * gd->rollups is empty if we have only unsortable columns to work
-		 * with.  Override work_mem in that case; otherwise, we'll rely on the
+		 * with.  Override hash_mem in that case; otherwise, we'll rely on the
 		 * sorted-input case to generate usable mixed paths.
 		 */
-		if (hashsize > work_mem * 1024L && gd->rollups)
+		if (hashsize > hash_mem * 1024L && gd->rollups)
 			return;				/* nope, won't fit */
 
 		/*
@@ -4374,12 +4374,15 @@ consider_groupingsets_paths(PlannerInfo *root,
 	 *
 	 * can_hash is passed in as false if some obstacle elsewhere (such as
 	 * ordered aggs) means that we shouldn't consider hashing at all.
+	 *
+	 * XXX: Does respecting hash_mem rather than work_mem here violate any
+	 * existing assumptions?
 	 */
 	if (can_hash && gd->any_hashable)
 	{
 		List	   *rollups = NIL;
 		List	   *hash_sets = list_copy(gd->unsortable_sets);
-		double		availspace = (work_mem * 1024.0);
+		double		availspace = (hash_mem * 1024.0);
 		ListCell   *lc;
 
 		/*
@@ -4400,7 +4403,7 @@ consider_groupingsets_paths(PlannerInfo *root,
 
 			/*
 			 * We treat this as a knapsack problem: the knapsack capacity
-			 * represents work_mem, the item weights are the estimated memory
+			 * represents hash_mem, the item weights are the estimated memory
 			 * usage of the hashtables needed to implement a single rollup,
 			 * and we really ought to use the cost saving as the item value;
 			 * however, currently the costs assigned to sort nodes don't
@@ -4441,7 +4444,7 @@ consider_groupingsets_paths(PlannerInfo *root,
 																rollup->numGroups);
 
 					/*
-					 * If sz is enormous, but work_mem (and hence scale) is
+					 * If sz is enormous, but hash_mem (and hence scale) is
 					 * small, avoid integer overflow here.
 					 */
 					k_weights[i] = (int) Min(floor(sz / scale),
@@ -4854,7 +4857,7 @@ create_distinct_paths(PlannerInfo *root,
 	 * should prevent selection of hashing: if the query uses DISTINCT ON
 	 * (because it won't really have the expected behavior if we hash), or if
 	 * enable_hashagg is off, or if it looks like the hashtable will exceed
-	 * work_mem.
+	 * hash_mem.
 	 *
 	 * Note: grouping_is_hashable() is much more expensive to check than the
 	 * other gating conditions, so we want to do it last.
@@ -4868,7 +4871,7 @@ create_distinct_paths(PlannerInfo *root,
 		Size		hashentrysize = hash_agg_entry_size(0, cheapest_input_path->pathtarget->width, 0);
 
 		allow_hash = !hashagg_avoid_disk_plan ||
-			(hashentrysize * numDistinctRows <= work_mem * 1024L);
+			(hashentrysize * numDistinctRows <= hash_mem * 1024L);
 	}
 
 	if (allow_hash && grouping_is_hashable(parse->distinctClause))
@@ -6768,12 +6771,12 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
 
 			/*
 			 * Provided that the estimated size of the hashtable does not
-			 * exceed work_mem, we'll generate a HashAgg Path, although if we
+			 * exceed hash_mem, we'll generate a HashAgg Path, although if we
 			 * were unable to sort above, then we'd better generate a Path, so
 			 * that we at least have one.
 			 */
 			if (!hashagg_avoid_disk_plan ||
-				hashaggtablesize < work_mem * 1024L ||
+				hashaggtablesize < hash_mem * 1024L ||
 				grouped_rel->pathlist == NIL)
 			{
 				/*
@@ -6796,7 +6799,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
 		/*
 		 * Generate a Finalize HashAgg Path atop of the cheapest partially
 		 * grouped path, assuming there is one. Once again, we'll only do this
-		 * if it looks as though the hash table won't exceed work_mem.
+		 * if it looks as though the hash table won't exceed hash_mem.
 		 */
 		if (partially_grouped_rel && partially_grouped_rel->pathlist)
 		{
@@ -6807,7 +6810,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
 														  dNumGroups);
 
 			if (!hashagg_avoid_disk_plan ||
-				hashaggtablesize < work_mem * 1024L)
+				hashaggtablesize < hash_mem * 1024L)
 				add_path(grouped_rel, (Path *)
 						 create_agg_path(root,
 										 grouped_rel,
@@ -7185,9 +7188,9 @@ create_partial_grouping_paths(PlannerInfo *root,
 
 		/*
 		 * Tentatively produce a partial HashAgg Path, depending on if it
-		 * looks as if the hash table will fit in work_mem.
+		 * looks as if the hash table will fit in hash_mem.
 		 */
-		if ((!hashagg_avoid_disk_plan || hashaggtablesize < work_mem * 1024L) &&
+		if ((!hashagg_avoid_disk_plan || hashaggtablesize < hash_mem * 1024L) &&
 			cheapest_total_path != NULL)
 		{
 			add_path(partially_grouped_rel, (Path *)
@@ -7215,7 +7218,7 @@ create_partial_grouping_paths(PlannerInfo *root,
 
 		/* Do the same for partial paths. */
 		if ((!hashagg_avoid_disk_plan ||
-			 hashaggtablesize < work_mem * 1024L) &&
+			 hashaggtablesize < hash_mem * 1024L) &&
 			cheapest_partial_path != NULL)
 		{
 			add_partial_path(partially_grouped_rel, (Path *)
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index b02fcb9bfe..5860bb212a 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -200,7 +200,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery,
 	 * XXX If an ANY subplan is uncorrelated, build_subplan may decide to hash
 	 * its output.  In that case it would've been better to specify full
 	 * retrieval.  At present, however, we can only check hashability after
-	 * we've made the subplan :-(.  (Determining whether it'll fit in work_mem
+	 * we've made the subplan :-(.  (Determining whether it'll fit in hash_mem
 	 * is the really hard part.)  Therefore, we don't want to be too
 	 * optimistic about the percentage of tuples retrieved, for fear of
 	 * selecting a plan that's bad for the materialization case.
@@ -278,7 +278,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery,
 
 			plan = create_plan(subroot, best_path);
 
-			/* Now we can check if it'll fit in work_mem */
+			/* Now we can check if it'll fit in hash_mem */
 			/* XXX can we check this at the Path stage? */
 			if (subplan_is_hashable(plan))
 			{
@@ -718,14 +718,14 @@ subplan_is_hashable(Plan *plan)
 	double		subquery_size;
 
 	/*
-	 * The estimated size of the subquery result must fit in work_mem. (Note:
+	 * The estimated size of the subquery result must fit in hash_mem. (Note:
 	 * we use heap tuple overhead here even though the tuples will actually be
 	 * stored as MinimalTuples; this provides some fudge factor for hashtable
 	 * overhead.)
 	 */
 	subquery_size = plan->plan_rows *
 		(MAXALIGN(plan->plan_width) + MAXALIGN(SizeofHeapTupleHeader));
-	if (subquery_size > work_mem * 1024L)
+	if (subquery_size > hash_mem * 1024L)
 		return false;
 
 	return true;
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 951aed80e7..4b2aa5b554 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -1050,15 +1050,17 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
 
 	/*
 	 * Don't do it if it doesn't look like the hashtable will fit into
-	 * work_mem.
+	 * hash_mem.
 	 */
 	hashentrysize = MAXALIGN(input_path->pathtarget->width) + MAXALIGN(SizeofMinimalTupleHeader);
 
-	if (hashentrysize * dNumGroups > work_mem * 1024L)
+	if (hashentrysize * dNumGroups > hash_mem * 1024L)
 		return false;
 
 	/*
-	 * See if the estimated cost is no more than doing it the other way.
+	 * See if the estimated cost is no more than doing it the other way.  We
+	 * deliberately give hashagg more memory than sort + group here (at least
+	 * in the common case where hash_mem exceeds work_mem).
 	 *
 	 * We need to consider input_plan + hashagg versus input_plan + sort +
 	 * group.  Note that the actual result plan might involve a SetOp or
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index e845a4b1ae..4419e7c00b 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1689,7 +1689,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
 		 */
 		int			hashentrysize = subpath->pathtarget->width + 64;
 
-		if (hashentrysize * pathnode->path.rows > work_mem * 1024L)
+		if (hashentrysize * pathnode->path.rows > hash_mem * 1024L)
 		{
 			/*
 			 * We should not try to hash.  Hack the SpecialJoinInfo to
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index bb49e80d16..249571112f 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -1462,6 +1462,9 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
 	(void) set_config_option("work_mem", workmembuf,
 							 PGC_USERSET, PGC_S_SESSION,
 							 GUC_ACTION_SAVE, true, 0, false);
+	(void) set_config_option("hash_mem", workmembuf,
+							 PGC_USERSET, PGC_S_SESSION,
+							 GUC_ACTION_SAVE, true, 0, false);
 
 	if (SPI_connect() != SPI_OK_CONNECT)
 		elog(ERROR, "SPI_connect failed");
@@ -1553,7 +1556,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
 		elog(ERROR, "SPI_finish failed");
 
 	/*
-	 * Restore work_mem.
+	 * Restore work_mem and hash_mem.
 	 */
 	AtEOXact_GUC(true, save_nestlevel);
 
@@ -1697,6 +1700,9 @@ RI_PartitionRemove_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
 	(void) set_config_option("work_mem", workmembuf,
 							 PGC_USERSET, PGC_S_SESSION,
 							 GUC_ACTION_SAVE, true, 0, false);
+	(void) set_config_option("hash_mem", workmembuf,
+							 PGC_USERSET, PGC_S_SESSION,
+							 GUC_ACTION_SAVE, true, 0, false);
 
 	if (SPI_connect() != SPI_OK_CONNECT)
 		elog(ERROR, "SPI_connect failed");
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index 74b52b7132..5f3910c0d8 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -119,6 +119,7 @@ int			IntervalStyle = INTSTYLE_POSTGRES;
 bool		enableFsync = true;
 bool		allowSystemTableMods = false;
 int			work_mem = 4096;
+int			hash_mem = 16384;
 int			maintenance_work_mem = 65536;
 int			max_parallel_maintenance_workers = 2;
 
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 75fc6f11d6..78878b5180 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -2320,8 +2320,7 @@ static struct config_int ConfigureNamesInt[] =
 		{"work_mem", PGC_USERSET, RESOURCES_MEM,
 			gettext_noop("Sets the maximum memory to be used for query workspaces."),
 			gettext_noop("This much memory can be used by each internal "
-						 "sort operation and hash table before switching to "
-						 "temporary disk files."),
+						 "sort operation before switching to temporary disk files."),
 			GUC_UNIT_KB | GUC_EXPLAIN
 		},
 		&work_mem,
@@ -2329,6 +2328,18 @@ static struct config_int ConfigureNamesInt[] =
 		NULL, NULL, NULL
 	},
 
+	{
+		{"hash_mem", PGC_USERSET, RESOURCES_MEM,
+			gettext_noop("Sets the maximum memory to be used for hash table query workspaces."),
+			gettext_noop("This much memory can be used by each hash table "
+						 "before switching to temporary disk files."),
+			GUC_UNIT_KB | GUC_EXPLAIN
+		},
+		&hash_mem,
+		16384, 64, MAX_KILOBYTES,
+		NULL, NULL, NULL
+	},
+
 	{
 		{"maintenance_work_mem", PGC_USERSET, RESOURCES_MEM,
 			gettext_noop("Sets the maximum memory to be used for maintenance operations."),
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index 3a25287a39..2ec05c34a5 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -128,6 +128,7 @@
 # Caution: it is not advisable to set max_prepared_transactions nonzero unless
 # you actively intend to use prepared transactions.
 #work_mem = 4MB				# min 64kB
+#hash_mem = 16MB			# min 64kB
 #maintenance_work_mem = 64MB		# min 1MB
 #autovacuum_work_mem = -1		# min 1MB, or -1 to use maintenance_work_mem
 #logical_decoding_work_mem = 64MB	# min 64kB
diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out
index 03ada654bb..0b633cbeda 100644
--- a/src/test/regress/expected/groupingsets.out
+++ b/src/test/regress/expected/groupingsets.out
@@ -1543,6 +1543,7 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou
 -- test the knapsack
 set enable_indexscan = false;
 set work_mem = '64kB';
+set hash_mem = '64kB';
 explain (costs off)
   select unique1,
          count(two), count(four), count(ten),
@@ -1586,6 +1587,7 @@ explain (costs off)
 (9 rows)
 
 set work_mem = '384kB';
+set hash_mem = '384kB';
 explain (costs off)
   select unique1,
          count(two), count(four), count(ten),
@@ -1635,8 +1637,8 @@ select v||'a', case when grouping(v||'a') = 1 then 1 else 0 end, count(*)
 
 --
 -- Compare results between plans using sorting and plans using hash
--- aggregation. Force spilling in both cases by setting work_mem low
--- and altering the statistics.
+-- aggregation. Force spilling in both cases by setting work_mem/hash_mem
+-- low and altering the statistics.
 --
 create table gs_data_1 as
 select g%1000 as g1000, g%100 as g100, g%10 as g10, g
@@ -1645,6 +1647,7 @@ analyze gs_data_1;
 alter table gs_data_1 set (autovacuum_enabled = 'false');
 update pg_class set reltuples = 10 where relname='gs_data_1';
 SET work_mem='64kB';
+set hash_mem = '64kB';
 -- Produce results with sorting.
 set enable_hashagg = false;
 set jit_above_cost = 0;
@@ -1697,6 +1700,7 @@ select g100, g10, sum(g::numeric), count(*), max(g::text)
 from gs_data_1 group by cube (g1000, g100,g10);
 set enable_sort = true;
 set work_mem to default;
+set hash_mem to default;
 -- Compare results
 (select * from gs_hash_1 except select * from gs_group_1)
   union all
diff --git a/src/test/regress/expected/join_hash.out b/src/test/regress/expected/join_hash.out
index 3a91c144a2..616c146096 100644
--- a/src/test/regress/expected/join_hash.out
+++ b/src/test/regress/expected/join_hash.out
@@ -81,11 +81,11 @@ create table wide as select generate_series(1, 2) as id, rpad('', 320000, 'x') a
 alter table wide set (parallel_workers = 2);
 -- The "optimal" case: the hash table fits in memory; we plan for 1
 -- batch, we stick to that number, and peak memory usage stays within
--- our work_mem budget
+-- our hash_mem budget
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '4MB';
+set local hash_mem = '4MB';
 explain (costs off)
   select count(*) from simple r join simple s using (id);
                QUERY PLAN               
@@ -118,7 +118,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '4MB';
+set local hash_mem = '4MB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -155,7 +155,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '4MB';
+set local hash_mem = '4MB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -191,11 +191,11 @@ $$);
 rollback to settings;
 -- The "good" case: batches required, but we plan the right number; we
 -- plan for some number of batches, and we stick to that number, and
--- peak memory usage says within our work_mem budget
+-- peak memory usage says within our hash_mem budget
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 explain (costs off)
   select count(*) from simple r join simple s using (id);
                QUERY PLAN               
@@ -228,7 +228,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -265,7 +265,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '192kB';
+set local hash_mem = '192kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -301,12 +301,12 @@ $$);
 rollback to settings;
 -- The "bad" case: during execution we need to increase number of
 -- batches; in this case we plan for 1 batch, and increase at least a
--- couple of times, and peak memory usage stays within our work_mem
+-- couple of times, and peak memory usage stays within our hash_mem
 -- budget
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 explain (costs off)
   select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
                       QUERY PLAN                      
@@ -339,7 +339,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -376,7 +376,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 1;
-set local work_mem = '192kB';
+set local hash_mem = '192kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -411,14 +411,14 @@ $$);
 
 rollback to settings;
 -- The "ugly" case: increasing the number of batches during execution
--- doesn't help, so stop trying to fit in work_mem and hope for the
+-- doesn't help, so stop trying to fit in hash_mem and hope for the
 -- best; in this case we plan for 1 batch, increases just once and
 -- then stop increasing because that didn't help at all, so we blow
--- right through the work_mem budget and hope for the best...
+-- right through the hash_mem budget and hope for the best...
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
                     QUERY PLAN                    
@@ -450,7 +450,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
@@ -485,7 +485,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 1;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
@@ -518,11 +518,11 @@ $$);
 (1 row)
 
 rollback to settings;
--- A couple of other hash join tests unrelated to work_mem management.
+-- A couple of other hash join tests unrelated to hash_mem management.
 -- Check that EXPLAIN ANALYZE has data even if the leader doesn't participate
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '4MB';
+set local hash_mem = '4MB';
 set local parallel_leader_participation = off;
 select * from hash_join_batches(
 $$
@@ -550,7 +550,7 @@ set parallel_tuple_cost = 0;
 set max_parallel_workers_per_gather = 2;
 set enable_material = off;
 set enable_mergejoin = off;
-set work_mem = '64kB';
+set hash_mem = '64kB';
 explain (costs off)
   select count(*) from join_foo
     left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@@ -601,7 +601,7 @@ set parallel_tuple_cost = 0;
 set max_parallel_workers_per_gather = 2;
 set enable_material = off;
 set enable_mergejoin = off;
-set work_mem = '4MB';
+set hash_mem = '4MB';
 explain (costs off)
   select count(*) from join_foo
     left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@@ -652,7 +652,7 @@ set parallel_tuple_cost = 0;
 set max_parallel_workers_per_gather = 2;
 set enable_material = off;
 set enable_mergejoin = off;
-set work_mem = '64kB';
+set hash_mem = '64kB';
 explain (costs off)
   select count(*) from join_foo
     left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@@ -703,7 +703,7 @@ set parallel_tuple_cost = 0;
 set max_parallel_workers_per_gather = 2;
 set enable_material = off;
 set enable_mergejoin = off;
-set work_mem = '4MB';
+set hash_mem = '4MB';
 explain (costs off)
   select count(*) from join_foo
     left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@@ -842,7 +842,7 @@ rollback to settings;
 savepoint settings;
 set max_parallel_workers_per_gather = 2;
 set enable_parallel_hash = on;
-set work_mem = '128kB';
+set hash_mem = '128kB';
 explain (costs off)
   select length(max(s.t))
   from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id);
diff --git a/src/test/regress/sql/groupingsets.sql b/src/test/regress/sql/groupingsets.sql
index e6c28743a4..a554ef2f62 100644
--- a/src/test/regress/sql/groupingsets.sql
+++ b/src/test/regress/sql/groupingsets.sql
@@ -409,6 +409,7 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou
 
 set enable_indexscan = false;
 set work_mem = '64kB';
+set hash_mem = '64kB';
 explain (costs off)
   select unique1,
          count(two), count(four), count(ten),
@@ -423,6 +424,7 @@ explain (costs off)
     from tenk1 group by grouping sets (unique1,hundred,ten,four,two);
 
 set work_mem = '384kB';
+set hash_mem = '384kB';
 explain (costs off)
   select unique1,
          count(two), count(four), count(ten),
@@ -443,8 +445,8 @@ select v||'a', case when grouping(v||'a') = 1 then 1 else 0 end, count(*)
 
 --
 -- Compare results between plans using sorting and plans using hash
--- aggregation. Force spilling in both cases by setting work_mem low
--- and altering the statistics.
+-- aggregation. Force spilling in both cases by setting work_mem/hash_mem
+-- low and altering the statistics.
 --
 
 create table gs_data_1 as
@@ -456,6 +458,7 @@ alter table gs_data_1 set (autovacuum_enabled = 'false');
 update pg_class set reltuples = 10 where relname='gs_data_1';
 
 SET work_mem='64kB';
+set hash_mem = '64kB';
 
 -- Produce results with sorting.
 
@@ -485,6 +488,7 @@ from gs_data_1 group by cube (g1000, g100,g10);
 
 set enable_sort = true;
 set work_mem to default;
+set hash_mem to default;
 
 -- Compare results
 
diff --git a/src/test/regress/sql/join_hash.sql b/src/test/regress/sql/join_hash.sql
index 68c1a8c7b6..69abeb08c6 100644
--- a/src/test/regress/sql/join_hash.sql
+++ b/src/test/regress/sql/join_hash.sql
@@ -89,12 +89,12 @@ alter table wide set (parallel_workers = 2);
 
 -- The "optimal" case: the hash table fits in memory; we plan for 1
 -- batch, we stick to that number, and peak memory usage stays within
--- our work_mem budget
+-- our hash_mem budget
 
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '4MB';
+set local hash_mem = '4MB';
 explain (costs off)
   select count(*) from simple r join simple s using (id);
 select count(*) from simple r join simple s using (id);
@@ -108,7 +108,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '4MB';
+set local hash_mem = '4MB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -123,7 +123,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '4MB';
+set local hash_mem = '4MB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -137,12 +137,12 @@ rollback to settings;
 
 -- The "good" case: batches required, but we plan the right number; we
 -- plan for some number of batches, and we stick to that number, and
--- peak memory usage says within our work_mem budget
+-- peak memory usage says within our hash_mem budget
 
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 explain (costs off)
   select count(*) from simple r join simple s using (id);
 select count(*) from simple r join simple s using (id);
@@ -156,7 +156,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -171,7 +171,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '192kB';
+set local hash_mem = '192kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join simple s using (id);
@@ -185,13 +185,13 @@ rollback to settings;
 
 -- The "bad" case: during execution we need to increase number of
 -- batches; in this case we plan for 1 batch, and increase at least a
--- couple of times, and peak memory usage stays within our work_mem
+-- couple of times, and peak memory usage stays within our hash_mem
 -- budget
 
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 explain (costs off)
   select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
 select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
@@ -205,7 +205,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -220,7 +220,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 1;
-set local work_mem = '192kB';
+set local hash_mem = '192kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -233,15 +233,15 @@ $$);
 rollback to settings;
 
 -- The "ugly" case: increasing the number of batches during execution
--- doesn't help, so stop trying to fit in work_mem and hope for the
+-- doesn't help, so stop trying to fit in hash_mem and hope for the
 -- best; in this case we plan for 1 batch, increases just once and
 -- then stop increasing because that didn't help at all, so we blow
--- right through the work_mem budget and hope for the best...
+-- right through the hash_mem budget and hope for the best...
 
 -- non-parallel
 savepoint settings;
 set local max_parallel_workers_per_gather = 0;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
 select count(*) from simple r join extremely_skewed s using (id);
@@ -254,7 +254,7 @@ rollback to settings;
 -- parallel with parallel-oblivious hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 set local enable_parallel_hash = off;
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
@@ -268,7 +268,7 @@ rollback to settings;
 -- parallel with parallel-aware hash join
 savepoint settings;
 set local max_parallel_workers_per_gather = 1;
-set local work_mem = '128kB';
+set local hash_mem = '128kB';
 set local enable_parallel_hash = on;
 explain (costs off)
   select count(*) from simple r join extremely_skewed s using (id);
@@ -279,12 +279,12 @@ $$
 $$);
 rollback to settings;
 
--- A couple of other hash join tests unrelated to work_mem management.
+-- A couple of other hash join tests unrelated to hash_mem management.
 
 -- Check that EXPLAIN ANALYZE has data even if the leader doesn't participate
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
-set local work_mem = '4MB';
+set local hash_mem = '4MB';
 set local parallel_leader_participation = off;
 select * from hash_join_batches(
 $$
@@ -310,7 +310,7 @@ set parallel_tuple_cost = 0;
 set max_parallel_workers_per_gather = 2;
 set enable_material = off;
 set enable_mergejoin = off;
-set work_mem = '64kB';
+set hash_mem = '64kB';
 explain (costs off)
   select count(*) from join_foo
     left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@@ -337,7 +337,7 @@ set parallel_tuple_cost = 0;
 set max_parallel_workers_per_gather = 2;
 set enable_material = off;
 set enable_mergejoin = off;
-set work_mem = '4MB';
+set hash_mem = '4MB';
 explain (costs off)
   select count(*) from join_foo
     left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@@ -364,7 +364,7 @@ set parallel_tuple_cost = 0;
 set max_parallel_workers_per_gather = 2;
 set enable_material = off;
 set enable_mergejoin = off;
-set work_mem = '64kB';
+set hash_mem = '64kB';
 explain (costs off)
   select count(*) from join_foo
     left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@@ -391,7 +391,7 @@ set parallel_tuple_cost = 0;
 set max_parallel_workers_per_gather = 2;
 set enable_material = off;
 set enable_mergejoin = off;
-set work_mem = '4MB';
+set hash_mem = '4MB';
 explain (costs off)
   select count(*) from join_foo
     left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss
@@ -453,7 +453,7 @@ rollback to settings;
 savepoint settings;
 set max_parallel_workers_per_gather = 2;
 set enable_parallel_hash = on;
-set work_mem = '128kB';
+set hash_mem = '128kB';
 explain (costs off)
   select length(max(s.t))
   from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id);
-- 
2.25.1

