diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 64eec91f8b..04019fefbe 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -43,6 +43,13 @@
 #include "utils/syscache.h"
 
 
+/*
+ * If repartitioning a batch sends more than this fraction of the tuples
+ * to either child batch, then assume that further repartitioning is unlikely
+ * to be useful.
+ */
+#define EXTREME_SKEW_LIMIT ((double) 0.95)
+
 static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
 static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
 static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
@@ -1030,14 +1037,15 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
 #endif
 
 	/*
-	 * If we dumped out either all or none of the tuples in the table, disable
+	 * If we dumped out almost all or none of the tuples in the table, disable
 	 * further expansion of nbatch.  This situation implies that we have
 	 * enough tuples of identical hashvalues to overflow spaceAllowed.
 	 * Increasing nbatch will not fix it since there's no way to subdivide the
 	 * group any more finely. We have to just gut it out and hope the server
 	 * has enough RAM.
 	 */
-	if (nfreed == 0 || nfreed == ninmemory)
+	if (nfreed < (ninmemory * (1 - EXTREME_SKEW_LIMIT)) ||
+		(double) nfreed > (ninmemory * EXTREME_SKEW_LIMIT))
 	{
 		hashtable->growEnabled = false;
 #ifdef HJDEBUG
