Alexander Pyhalov писал(а) 2026-01-13 13:44:
For now I start thinking we need some form of FETCH, which stops fetching data based on batch size...

Hi.

To limit memory consumption, we actually have to retreive less data. And we can do it only on the side of the foreign server. I've rewritten the third patch. We introduce a new parameter - cursor_fetch_limit, which is set by postgres_fdw. When it is set, fetching limited count of records from the cursor is also limited by memory consumed by the records. Of course, record size is some estimation (for example, we don't know what out function will do).

This works as expected - in my tests with tables of large records, backends, executing selects, were always restricted by about 2 GB of RAM overall (without patch memory consumption easily grows up to 8 GB). However, now when we got less tuples from executor, than expected, we should recheck, if these are all tuples we can get. I've introduced es_eof EState field to signal that there's no more tuples. Don't know if it's the best way.

--
Best regards,
Alexander Pyhalov,
Postgres Professional
From 9eca4c6277c63f4f8f28afced07ee13c65bfb36e Mon Sep 17 00:00:00 2001
From: Alexander Pyhalov <[email protected]>
Date: Tue, 16 Dec 2025 18:46:27 +0300
Subject: [PATCH 1/3] Limit batch_size for foreign insert with work_mem

Option batch_size can be set on foreign server level. It can be very optimistic
even for one table with different tuple lengths. To prevent large memory usage
limit effective batch size with work_mem.
---
 src/backend/executor/nodeModifyTable.c | 15 ++++++++++++---
 src/include/nodes/execnodes.h          |  1 +
 2 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 7d7411a7056..9495d9b04bb 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -943,12 +943,18 @@ ExecInsert(ModifyTableContext *context,
 		if (resultRelInfo->ri_BatchSize > 1)
 		{
 			bool		flushed = false;
+			Size		tuple_len;
+
+			/* Compute length of the current tuple */
+			slot_getallattrs(slot);
+			tuple_len = heap_compute_data_size(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull);
 
 			/*
-			 * When we've reached the desired batch size, perform the
-			 * insertion.
+			 * When we've reached the desired batch size or exceeded work_mem,
+			 * perform the insertion.
 			 */
-			if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
+			if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize ||
+				((tuple_len + resultRelInfo->ri_BatchMemoryUsed > work_mem * 1024) && (resultRelInfo->ri_NumSlots > 0)))
 			{
 				ExecBatchInsert(mtstate, resultRelInfo,
 								resultRelInfo->ri_Slots,
@@ -1019,6 +1025,8 @@ ExecInsert(ModifyTableContext *context,
 								   resultRelInfo));
 
 			resultRelInfo->ri_NumSlots++;
+			/* Batch size can grow up to tuple length even if it exceeds work_mem. */
+			resultRelInfo->ri_BatchMemoryUsed += tuple_len;
 
 			MemoryContextSwitchTo(oldContext);
 
@@ -1418,6 +1426,7 @@ ExecBatchInsert(ModifyTableState *mtstate,
 		ExecClearTuple(planSlots[i]);
 	}
 	resultRelInfo->ri_NumSlots = 0;
+	resultRelInfo->ri_BatchMemoryUsed = 0;
 }
 
 /*
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index f8053d9e572..33d3eb06891 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -544,6 +544,7 @@ typedef struct ResultRelInfo
 	int			ri_NumSlots;	/* number of slots in the array */
 	int			ri_NumSlotsInitialized; /* number of initialized slots */
 	int			ri_BatchSize;	/* max slots inserted in a single batch */
+	int			ri_BatchMemoryUsed;	/* memory used by batch */
 	TupleTableSlot **ri_Slots;	/* input tuples for batch insert */
 	TupleTableSlot **ri_PlanSlots;
 
-- 
2.43.0

From b4afad03431f376c56238266f401e061c4d31db5 Mon Sep 17 00:00:00 2001
From: Alexander Pyhalov <[email protected]>
Date: Tue, 30 Dec 2025 10:35:22 +0300
Subject: [PATCH 2/3] Fall back to foreign insert when tuple is too big

When tuple is more then work_mem, we don't form batch,
but fall through to usual foreign insert. This means
that executor can form batch, insert it and finish with
a usual insert, so there will be no pending batch inserts
to process. To avoid manipulation with possibly long lists,
we don't clear es_insert_pending_result_relations and
es_insert_pending_modifytables lists, but check in
ExecPendingInserts() if there are any pending tuples.
---
 .../postgres_fdw/expected/postgres_fdw.out    |  11 ++
 contrib/postgres_fdw/sql/postgres_fdw.sql     |   9 ++
 src/backend/executor/nodeModifyTable.c        | 138 ++++++++++--------
 src/include/nodes/execnodes.h                 |   1 +
 4 files changed, 96 insertions(+), 63 deletions(-)

diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 6066510c7c0..cb3044c769a 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -7640,6 +7640,17 @@ select count(*) from tab_batch_sharded;
     45
 (1 row)
 
+delete from tab_batch_sharded;
+-- test batch insert with large tuples and switching from batch to usual insert
+set work_mem to 64;
+insert into tab_batch_sharded select 3, case when i%4 = 0 then lpad('a',65*1024,'a') else 'test'|| i end from generate_series(1, 100) i;
+select count(*) from tab_batch_sharded;
+ count 
+-------
+   100
+(1 row)
+
+reset work_mem;
 drop table tab_batch_local;
 drop table tab_batch_sharded;
 drop table tab_batch_sharded_p1_remote;
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index 4f7ab2ed0ac..a084afc8de7 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -1944,10 +1944,19 @@ create foreign table tab_batch_sharded_p1 partition of tab_batch_sharded
   server loopback options (table_name 'tab_batch_sharded_p1_remote');
 insert into tab_batch_sharded select * from tab_batch_local;
 select count(*) from tab_batch_sharded;
+delete from tab_batch_sharded;
+-- test batch insert with large tuples and switching from batch to usual insert
+set work_mem to 64;
+insert into tab_batch_sharded select 3, case when i%4 = 0 then lpad('a',65*1024,'a') else 'test'|| i end from generate_series(1, 100) i;
+select count(*) from tab_batch_sharded;
+reset work_mem;
+
 drop table tab_batch_local;
 drop table tab_batch_sharded;
 drop table tab_batch_sharded_p1_remote;
 
+
+
 alter server loopback options (drop batch_size);
 
 -- ===================================================================
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 9495d9b04bb..ba3d135d6a6 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -942,7 +942,6 @@ ExecInsert(ModifyTableContext *context,
 		 */
 		if (resultRelInfo->ri_BatchSize > 1)
 		{
-			bool		flushed = false;
 			Size		tuple_len;
 
 			/* Compute length of the current tuple */
@@ -961,76 +960,82 @@ ExecInsert(ModifyTableContext *context,
 								resultRelInfo->ri_PlanSlots,
 								resultRelInfo->ri_NumSlots,
 								estate, canSetTag);
-				flushed = true;
 			}
 
-			oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
-
-			if (resultRelInfo->ri_Slots == NULL)
+			if (tuple_len < work_mem * 1024)
 			{
-				resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
-				resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
-			}
+				oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
 
-			/*
-			 * Initialize the batch slots. We don't know how many slots will
-			 * be needed, so we initialize them as the batch grows, and we
-			 * keep them across batches. To mitigate an inefficiency in how
-			 * resource owner handles objects with many references (as with
-			 * many slots all referencing the same tuple descriptor) we copy
-			 * the appropriate tuple descriptor for each slot.
-			 */
-			if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
-			{
-				TupleDesc	tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
-				TupleDesc	plan_tdesc =
-					CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
+				if (resultRelInfo->ri_Slots == NULL)
+				{
+					resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
+					resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
+				}
 
-				resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
-					MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
+				/*
+				 * Initialize the batch slots. We don't know how many slots
+				 * will be needed, so we initialize them as the batch grows,
+				 * and we keep them across batches. To mitigate an
+				 * inefficiency in how resource owner handles objects with
+				 * many references (as with many slots all referencing the
+				 * same tuple descriptor) we copy the appropriate tuple
+				 * descriptor for each slot.
+				 */
+				if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
+				{
+					TupleDesc	tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
+					TupleDesc	plan_tdesc =
+						CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
 
-				resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
-					MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
+					resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
+						MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
 
-				/* remember how many batch slots we initialized */
-				resultRelInfo->ri_NumSlotsInitialized++;
-			}
+					resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
+						MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
 
-			ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
-						 slot);
+					/* remember how many batch slots we initialized */
+					resultRelInfo->ri_NumSlotsInitialized++;
+				}
 
-			ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
-						 planSlot);
+				ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
+							 slot);
 
-			/*
-			 * If these are the first tuples stored in the buffers, add the
-			 * target rel and the mtstate to the
-			 * es_insert_pending_result_relations and
-			 * es_insert_pending_modifytables lists respectively, except in
-			 * the case where flushing was done above, in which case they
-			 * would already have been added to the lists, so no need to do
-			 * this.
-			 */
-			if (resultRelInfo->ri_NumSlots == 0 && !flushed)
-			{
-				Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
-										resultRelInfo));
-				estate->es_insert_pending_result_relations =
-					lappend(estate->es_insert_pending_result_relations,
-							resultRelInfo);
-				estate->es_insert_pending_modifytables =
-					lappend(estate->es_insert_pending_modifytables, mtstate);
-			}
-			Assert(list_member_ptr(estate->es_insert_pending_result_relations,
-								   resultRelInfo));
+				ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
+							 planSlot);
 
-			resultRelInfo->ri_NumSlots++;
-			/* Batch size can grow up to tuple length even if it exceeds work_mem. */
-			resultRelInfo->ri_BatchMemoryUsed += tuple_len;
+				/*
+				 * If these are the first tuples stored in the buffers, add
+				 * the target rel and the mtstate to the
+				 * es_insert_pending_result_relations and
+				 * es_insert_pending_modifytables lists respectively, except
+				 * in the case where flushing was done above, in which case
+				 * they would already have been added to the lists, so no need
+				 * to do this.
+				 */
+				if (resultRelInfo->ri_NumSlots == 0 && !resultRelInfo->ri_ExecutorPendingStateModified)
+				{
+					Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
+											resultRelInfo));
+					estate->es_insert_pending_result_relations =
+						lappend(estate->es_insert_pending_result_relations,
+								resultRelInfo);
+					estate->es_insert_pending_modifytables =
+						lappend(estate->es_insert_pending_modifytables, mtstate);
+					resultRelInfo->ri_ExecutorPendingStateModified = true;
+				}
+				Assert(list_member_ptr(estate->es_insert_pending_result_relations,
+									   resultRelInfo));
 
-			MemoryContextSwitchTo(oldContext);
+				resultRelInfo->ri_NumSlots++;
+				resultRelInfo->ri_BatchMemoryUsed += tuple_len;
+				/* We've flushed batch if it is too big. */
+				Assert(resultRelInfo->ri_BatchMemoryUsed < work_mem * 1024);
 
-			return NULL;
+				MemoryContextSwitchTo(oldContext);
+
+				return NULL;
+			}
+			/* else do usual foreign insert */
 		}
 
 		/*
@@ -1445,11 +1450,18 @@ ExecPendingInserts(EState *estate)
 		ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
 
 		Assert(mtstate);
-		ExecBatchInsert(mtstate, resultRelInfo,
-						resultRelInfo->ri_Slots,
-						resultRelInfo->ri_PlanSlots,
-						resultRelInfo->ri_NumSlots,
-						estate, mtstate->canSetTag);
+
+		/*
+		 * Batch insert could switch to non-batched insert, in this case we
+		 * could have no filled slots.
+		 */
+		if (resultRelInfo->ri_NumSlots > 0)
+			ExecBatchInsert(mtstate, resultRelInfo,
+							resultRelInfo->ri_Slots,
+							resultRelInfo->ri_PlanSlots,
+							resultRelInfo->ri_NumSlots,
+							estate, mtstate->canSetTag);
+		resultRelInfo->ri_ExecutorPendingStateModified = false;
 	}
 
 	list_free(estate->es_insert_pending_result_relations);
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 33d3eb06891..3220f9b2574 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -545,6 +545,7 @@ typedef struct ResultRelInfo
 	int			ri_NumSlotsInitialized; /* number of initialized slots */
 	int			ri_BatchSize;	/* max slots inserted in a single batch */
 	int			ri_BatchMemoryUsed;	/* memory used by batch */
+	bool			ri_ExecutorPendingStateModified; /* true if member of estate->es_insert_pending_result_relations */
 	TupleTableSlot **ri_Slots;	/* input tuples for batch insert */
 	TupleTableSlot **ri_PlanSlots;
 
-- 
2.43.0

From 2e0710dd4db89fbb1f02d3e73165425dee76830b Mon Sep 17 00:00:00 2001
From: Alexander Pyhalov <[email protected]>
Date: Thu, 15 Jan 2026 09:20:51 +0300
Subject: [PATCH 3/3] Introduce cursor_fetch_limit parameter and use it in
 postgres_fdw

This parameter is specified in bytes and limits the number of records,
which is retrieved by FETCH statement, based on their size. FETCH ALL
is not limited.

When cursor_fetch_limit >= 0 and records count is passed to
ExecutorRun(), we can't longer rely on fact that when executor
returned less records than expected, there's no more records to fetch,
so we introduce additional EState field which after ExecutorRun()
call says if there are more records.
---
 contrib/postgres_fdw/connection.c             |  13 +++
 contrib/postgres_fdw/postgres_fdw.c           |   7 +-
 src/backend/access/common/heaptuple.c         |  54 +++++++++
 src/backend/executor/execMain.c               |  37 +++++-
 src/backend/executor/execUtils.c              |   1 +
 src/backend/tcop/pquery.c                     |  54 +++++++--
 src/backend/utils/misc/guc_parameters.dat     |   9 ++
 src/backend/utils/misc/guc_tables.c           |   1 +
 src/backend/utils/misc/postgresql.conf.sample |   1 +
 src/include/access/htup_details.h             |   3 +
 src/include/executor/executor.h               |   2 +
 src/include/nodes/execnodes.h                 |   1 +
 src/test/regress/expected/portals.out         | 105 ++++++++++++++++++
 src/test/regress/sql/portals.sql              |  28 +++++
 14 files changed, 304 insertions(+), 12 deletions(-)

diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 487a1a23170..d6c6c26b393 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -802,6 +802,19 @@ configure_remote_session(PGconn *conn)
 		do_sql_command(conn, "SET extra_float_digits = 3");
 	else
 		do_sql_command(conn, "SET extra_float_digits = 2");
+	/*
+	 * Limit fetch size with work_mem to avoid uncontrolled memory
+	 * consumption.
+	 */
+	if (remoteversion >= 190000)
+	{
+		char sql[64];
+		int fetch_limit;
+
+		fetch_limit = (work_mem * 1024 > MaxAllocSize) ? MaxAllocSize : work_mem * 1024;
+		snprintf(sql, sizeof(sql), "SET cursor_fetch_limit = %d", fetch_limit);
+		do_sql_command(conn, sql);
+	}
 }
 
 /*
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 3572689e33b..abfbd0e60ac 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -3865,8 +3865,11 @@ fetch_more_data(ForeignScanState *node)
 	if (fsstate->fetch_ct_2 < 2)
 		fsstate->fetch_ct_2++;
 
-	/* Must be EOF if we didn't get as many tuples as we asked for. */
-	fsstate->eof_reached = (numrows < fsstate->fetch_size);
+	/*
+	 * Must be EOF if we got 0 rows. We can get less rows than we asked for
+	 * even if there's more data if batch of "fetch_size" exceeds work_mem.
+	 */
+	fsstate->eof_reached = (numrows == 0);
 
 	PQclear(res);
 
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 11bec20e82e..7308d2ff4f0 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -57,6 +57,7 @@
 
 #include "postgres.h"
 
+#include "access/detoast.h"
 #include "access/heaptoast.h"
 #include "access/sysattr.h"
 #include "access/tupdesc_details.h"
@@ -266,6 +267,59 @@ heap_compute_data_size(TupleDesc tupleDesc,
 	return data_length;
 }
 
+/*
+ * Estimate tuple size when it's transferred to the wire. Unlike
+ * heap_compute_data_size() it assumes that all toasts will be
+ * detoasted (but doesn't make assumptions about further
+ * processing).
+ */
+Size
+estimate_tuple_size(TupleDesc tupleDesc,
+					   const Datum *values,
+					   const bool *isnull)
+{
+	Size		data_length = 0;
+	int			i;
+	int			numberOfAttributes = tupleDesc->natts;
+
+	for (i = 0; i < numberOfAttributes; i++)
+	{
+		Datum		val;
+		CompactAttribute *atti;
+		bool		is_varlena;
+		bool		is_varwidth;
+
+		if (isnull[i])
+			continue;
+
+		val = values[i];
+		atti = TupleDescCompactAttr(tupleDesc, i);
+
+		is_varlena = (!atti->attbyval && atti->attlen == -1);
+		is_varwidth = (!atti->attbyval && atti->attlen == -2);
+
+		if (is_varlena)
+		{
+			data_length = att_nominal_alignby(data_length, atti->attalignby);
+			data_length += toast_raw_datum_size(val);
+		}
+		else if (is_varwidth)
+		{
+			data_length = att_nominal_alignby(data_length, atti->attalignby);
+			data_length += strlen(DatumGetCString(val)) + 1;
+		}
+		else
+		{
+			data_length = att_datum_alignby(data_length, atti->attalignby,
+											atti->attlen, val);
+			data_length = att_addlength_datum(data_length, atti->attlen,
+											  val);
+		}
+	}
+
+	return data_length;
+}
+
 /*
  * Per-attribute helper for heap_fill_tuple and other routines building tuples.
  *
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index bfd3ebc601e..7b0fef57a66 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -64,6 +64,9 @@
 #include "utils/snapmgr.h"
 
 
+/* max batch for fetching from cursor */
+int cursor_fetch_limit = -1;
+
 /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
 ExecutorStart_hook_type ExecutorStart_hook = NULL;
 ExecutorRun_hook_type ExecutorRun_hook = NULL;
@@ -343,6 +346,7 @@ standard_ExecutorRun(QueryDesc *queryDesc,
 	 * startup tuple receiver, if we will be emitting tuples
 	 */
 	estate->es_processed = 0;
+	estate->es_eof = false;
 
 	sendTuples = (operation == CMD_SELECT ||
 				  queryDesc->plannedstmt->hasReturning);
@@ -1669,11 +1673,13 @@ ExecutePlan(QueryDesc *queryDesc,
 	bool		use_parallel_mode;
 	TupleTableSlot *slot;
 	uint64		current_tuple_count;
+	Size		current_batch_size;
 
 	/*
 	 * initialize local variables
 	 */
 	current_tuple_count = 0;
+	current_batch_size = 0;
 
 	/*
 	 * Set the direction.
@@ -1715,7 +1721,10 @@ ExecutePlan(QueryDesc *queryDesc,
 		 * process so we just end the loop...
 		 */
 		if (TupIsNull(slot))
+		{
+			estate->es_eof = true;
 			break;
+		}
 
 		/*
 		 * If we have a junk filter, then project a new tuple with the junk
@@ -1740,7 +1749,10 @@ ExecutePlan(QueryDesc *queryDesc,
 			 * end the loop.
 			 */
 			if (!dest->receiveSlot(slot, dest))
+			{
+				estate->es_eof = true;
 				break;
+			}
 		}
 
 		/*
@@ -1757,8 +1769,29 @@ ExecutePlan(QueryDesc *queryDesc,
 		 * means no limit.
 		 */
 		current_tuple_count++;
-		if (numberTuples && numberTuples == current_tuple_count)
-			break;
+		if (numberTuples)
+		{
+			if (numberTuples == current_tuple_count)
+				break;
+
+			/*
+			 * If batch limit in bytes is specified, count current batch size
+			 * and exit if limit is reached
+			 */
+			if (cursor_fetch_limit >= 0)
+			{
+				Size tuple_len;
+
+				/* Compute length of the current tuple */
+				slot_getallattrs(slot);
+				tuple_len = estimate_tuple_size(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull);
+				current_batch_size += tuple_len;
+
+				if (current_batch_size > cursor_fetch_limit)
+					break;
+			}
+		}
+
 	}
 
 	/*
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index a7955e476f9..7f820388878 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -143,6 +143,7 @@ CreateExecutorState(void)
 
 	estate->es_processed = 0;
 	estate->es_total_processed = 0;
+	estate->es_eof = false;
 
 	estate->es_top_eflags = 0;
 	estate->es_instrument = 0;
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index d8fc75d0bb9..78908ae2a8b 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -17,6 +17,7 @@
 
 #include <limits.h>
 
+#include "access/htup_details.h"
 #include "access/xact.h"
 #include "commands/prepare.h"
 #include "executor/executor.h"
@@ -44,7 +45,7 @@ static void ProcessQuery(PlannedStmt *plan,
 						 QueryCompletion *qc);
 static void FillPortalStore(Portal portal, bool isTopLevel);
 static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
-						   DestReceiver *dest);
+						   DestReceiver *dest, bool *eof);
 static uint64 PortalRunSelect(Portal portal, bool forward, long count,
 							  DestReceiver *dest);
 static void PortalRunUtility(Portal portal, PlannedStmt *pstmt,
@@ -864,6 +865,7 @@ PortalRunSelect(Portal portal,
 	QueryDesc  *queryDesc;
 	ScanDirection direction;
 	uint64		nprocessed;
+	bool		eof;
 
 	/*
 	 * NB: queryDesc will be NULL if we are fetching from a held cursor or a
@@ -909,20 +911,23 @@ PortalRunSelect(Portal portal,
 			count = 0;
 
 		if (portal->holdStore)
-			nprocessed = RunFromStore(portal, direction, (uint64) count, dest);
+		{
+			nprocessed = RunFromStore(portal, direction, (uint64) count, dest, &eof);
+		}
 		else
 		{
 			PushActiveSnapshot(queryDesc->snapshot);
 			ExecutorRun(queryDesc, direction, (uint64) count);
 			nprocessed = queryDesc->estate->es_processed;
 			PopActiveSnapshot();
+			eof = queryDesc->estate->es_eof;
 		}
 
 		if (!ScanDirectionIsNoMovement(direction))
 		{
 			if (nprocessed > 0)
 				portal->atStart = false;	/* OK to go backward now */
-			if (count == 0 || nprocessed < (uint64) count)
+			if (count == 0 || eof)
 				portal->atEnd = true;	/* we retrieved 'em all */
 			portal->portalPos += nprocessed;
 		}
@@ -948,13 +953,16 @@ PortalRunSelect(Portal portal,
 			count = 0;
 
 		if (portal->holdStore)
-			nprocessed = RunFromStore(portal, direction, (uint64) count, dest);
+		{
+			nprocessed = RunFromStore(portal, direction, (uint64) count, dest, &eof);
+		}
 		else
 		{
 			PushActiveSnapshot(queryDesc->snapshot);
 			ExecutorRun(queryDesc, direction, (uint64) count);
 			nprocessed = queryDesc->estate->es_processed;
 			PopActiveSnapshot();
+			eof = queryDesc->estate->es_eof;
 		}
 
 		if (!ScanDirectionIsNoMovement(direction))
@@ -964,7 +972,7 @@ PortalRunSelect(Portal portal,
 				portal->atEnd = false;	/* OK to go forward now */
 				portal->portalPos++;	/* adjust for endpoint case */
 			}
-			if (count == 0 || nprocessed < (uint64) count)
+			if (count == 0 || eof)
 			{
 				portal->atStart = true; /* we retrieved 'em all */
 				portal->portalPos = 0;
@@ -1049,15 +1057,18 @@ FillPortalStore(Portal portal, bool isTopLevel)
  */
 static uint64
 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
-			 DestReceiver *dest)
+			 DestReceiver *dest, bool *eof)
 {
 	uint64		current_tuple_count = 0;
+	Size		current_batch_size = 0;
 	TupleTableSlot *slot;
 
 	slot = MakeSingleTupleTableSlot(portal->tupDesc, &TTSOpsMinimalTuple);
 
 	dest->rStartup(dest, CMD_SELECT, portal->tupDesc);
 
+	*eof = false;
+
 	if (ScanDirectionIsNoMovement(direction))
 	{
 		/* do nothing except start/stop the destination */
@@ -1079,7 +1090,10 @@ RunFromStore(Portal portal, ScanDirection direction, uint64 count,
 			MemoryContextSwitchTo(oldcontext);
 
 			if (!ok)
+			{
+				*eof = true;
 				break;
+			}
 
 			/*
 			 * If we are not able to send the tuple, we assume the destination
@@ -1087,7 +1101,24 @@ RunFromStore(Portal portal, ScanDirection direction, uint64 count,
 			 * end the loop.
 			 */
 			if (!dest->receiveSlot(slot, dest))
+			{
+				*eof = true;
 				break;
+			}
+
+			/*
+			 * If batch limit in bytes is specified, count current batch
+			 * size.
+			 */
+			if (cursor_fetch_limit >= 0 && count)
+			{
+				Size		tuple_len;
+
+				/* Compute length of the current tuple. */
+				slot_getallattrs(slot);
+				tuple_len = estimate_tuple_size(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull);
+				current_batch_size += tuple_len;
+			}
 
 			ExecClearTuple(slot);
 
@@ -1097,8 +1128,15 @@ RunFromStore(Portal portal, ScanDirection direction, uint64 count,
 			 * means no limit.
 			 */
 			current_tuple_count++;
-			if (count && count == current_tuple_count)
-				break;
+			if (count)
+			{
+				if (count == current_tuple_count)
+					break;
+
+				/* Exit if batch limit is reached */
+				if (current_batch_size > cursor_fetch_limit)
+					break;
+			}
 		}
 	}
 
diff --git a/src/backend/utils/misc/guc_parameters.dat b/src/backend/utils/misc/guc_parameters.dat
index 7c60b125564..e7bc17a8f07 100644
--- a/src/backend/utils/misc/guc_parameters.dat
+++ b/src/backend/utils/misc/guc_parameters.dat
@@ -522,6 +522,15 @@
   assign_hook => 'assign_createrole_self_grant',
 },
 
+{ name => 'cursor_fetch_limit', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_MEM',
+  short_desc => 'Sets the maximum size in bytes for fetching from cursor',
+  flags => 'GUC_UNIT_BYTE',
+  variable => 'cursor_fetch_limit',
+  boot_val => '-1',
+  min => '-1',
+  max => 'MaxAllocSize',
+},
+
 { name => 'cursor_tuple_fraction', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_OTHER',
   short_desc => 'Sets the planner\'s estimate of the fraction of a cursor\'s rows that will be retrieved.',
   flags => 'GUC_EXPLAIN',
diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c
index 73ff6ad0a32..bf40d407494 100644
--- a/src/backend/utils/misc/guc_tables.c
+++ b/src/backend/utils/misc/guc_tables.c
@@ -50,6 +50,7 @@
 #include "commands/vacuum.h"
 #include "common/file_utils.h"
 #include "common/scram-common.h"
+#include "executor/executor.h"
 #include "jit/jit.h"
 #include "libpq/auth.h"
 #include "libpq/libpq.h"
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index dc9e2255f8a..169e96e20fe 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -468,6 +468,7 @@
 #default_statistics_target = 100        # range 1-10000
 #constraint_exclusion = partition       # on, off, or partition
 #cursor_tuple_fraction = 0.1            # range 0.0-1.0
+#cursor_fetch_limit = -1                # maximum size in bytes for fetching from cursor
 #from_collapse_limit = 8
 #jit = on                               # allow JIT compilation
 #join_collapse_limit = 8                # 1 disables collapsing of explicit
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index d406825ff22..cb24e159509 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -808,6 +808,9 @@ HeapTupleClearHeapOnly(const HeapTupleData *tuple)
 /* prototypes for functions in common/heaptuple.c */
 extern Size heap_compute_data_size(TupleDesc tupleDesc,
 								   const Datum *values, const bool *isnull);
+extern Size estimate_tuple_size(TupleDesc tupleDesc,
+					   const Datum *values, const bool *isnull);
+
 extern void heap_fill_tuple(TupleDesc tupleDesc,
 							const Datum *values, const bool *isnull,
 							char *data, Size data_size,
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 5929aabc353..80fcb8f6f10 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -96,6 +96,8 @@ typedef bool (*ExecutorCheckPerms_hook_type) (List *rangeTable,
 											  bool ereport_on_violation);
 extern PGDLLIMPORT ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook;
 
+/* GUC variables */
+extern PGDLLIMPORT int cursor_fetch_limit;
 
 /*
  * prototypes from functions in execAmi.c
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 3220f9b2574..ecd0edd390c 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -719,6 +719,7 @@ typedef struct EState
 								 * ExecutorRun() call. */
 	uint64		es_total_processed; /* total # of tuples aggregated across all
 									 * ExecutorRun() calls. */
+	bool		es_eof;	/* true if no more data to process */
 
 	int			es_top_eflags;	/* eflags passed to ExecutorStart */
 	int			es_instrument;	/* OR of InstrumentOption flags */
diff --git a/src/test/regress/expected/portals.out b/src/test/regress/expected/portals.out
index 31f77abc446..2a1d773b374 100644
--- a/src/test/regress/expected/portals.out
+++ b/src/test/regress/expected/portals.out
@@ -1561,3 +1561,108 @@ fetch all in held_portal;
 (1 row)
 
 reset default_toast_compression;
+-- Check memory limit for fetching from cursor
+set cursor_fetch_limit = '512B';
+BEGIN;
+DECLARE foo1 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2;
+DECLARE foo2 SCROLL CURSOR FOR SELECT * FROM tenk2;
+DECLARE foo3 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2;
+DECLARE foo4 SCROLL CURSOR FOR SELECT * FROM tenk2;
+FETCH 1 in foo1;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+    8800 |       0 |   0 |    0 |   0 |      0 |       0 |      800 |         800 |      3800 |     8800 |   0 |    1 | MAAAAA   | AAAAAA   | AAAAxx
+(1 row)
+
+FETCH 2 in foo2;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+    8800 |       0 |   0 |    0 |   0 |      0 |       0 |      800 |         800 |      3800 |     8800 |   0 |    1 | MAAAAA   | AAAAAA   | AAAAxx
+    1891 |       1 |   1 |    3 |   1 |     11 |      91 |      891 |        1891 |      1891 |     1891 | 182 |  183 | TUAAAA   | BAAAAA   | HHHHxx
+(2 rows)
+
+FETCH 3 in foo3;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+    8800 |       0 |   0 |    0 |   0 |      0 |       0 |      800 |         800 |      3800 |     8800 |   0 |    1 | MAAAAA   | AAAAAA   | AAAAxx
+    1891 |       1 |   1 |    3 |   1 |     11 |      91 |      891 |        1891 |      1891 |     1891 | 182 |  183 | TUAAAA   | BAAAAA   | HHHHxx
+    3420 |       2 |   0 |    0 |   0 |      0 |      20 |      420 |        1420 |      3420 |     3420 |  40 |   41 | OBAAAA   | CAAAAA   | OOOOxx
+(3 rows)
+
+-- Well, we've reached the limit
+FETCH 4 in foo4;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+    8800 |       0 |   0 |    0 |   0 |      0 |       0 |      800 |         800 |      3800 |     8800 |   0 |    1 | MAAAAA   | AAAAAA   | AAAAxx
+    1891 |       1 |   1 |    3 |   1 |     11 |      91 |      891 |        1891 |      1891 |     1891 | 182 |  183 | TUAAAA   | BAAAAA   | HHHHxx
+    3420 |       2 |   0 |    0 |   0 |      0 |      20 |      420 |        1420 |      3420 |     3420 |  40 |   41 | OBAAAA   | CAAAAA   | OOOOxx
+(3 rows)
+
+-- Fetch the next batch
+FETCH 4 in foo4;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+    9850 |       3 |   0 |    2 |   0 |     10 |      50 |      850 |        1850 |      4850 |     9850 | 100 |  101 | WOAAAA   | DAAAAA   | VVVVxx
+    7164 |       4 |   0 |    0 |   4 |      4 |      64 |      164 |        1164 |      2164 |     7164 | 128 |  129 | OPAAAA   | EAAAAA   | AAAAxx
+    8009 |       5 |   1 |    1 |   9 |      9 |       9 |        9 |           9 |      3009 |     8009 |  18 |   19 | BWAAAA   | FAAAAA   | HHHHxx
+(3 rows)
+
+FETCH backward 4 in foo4;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+    7164 |       4 |   0 |    0 |   4 |      4 |      64 |      164 |        1164 |      2164 |     7164 | 128 |  129 | OPAAAA   | EAAAAA   | AAAAxx
+    9850 |       3 |   0 |    2 |   0 |     10 |      50 |      850 |        1850 |      4850 |     9850 | 100 |  101 | WOAAAA   | DAAAAA   | VVVVxx
+    3420 |       2 |   0 |    0 |   0 |      0 |      20 |      420 |        1420 |      3420 |     3420 |  40 |   41 | OBAAAA   | CAAAAA   | OOOOxx
+(3 rows)
+
+-- Check cursor with hold
+DECLARE foo5 CURSOR WITH HOLD FOR SELECT * FROM tenk2 LIMIT 30;
+commit;
+FETCH 5 FROM foo5;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+    8800 |       0 |   0 |    0 |   0 |      0 |       0 |      800 |         800 |      3800 |     8800 |   0 |    1 | MAAAAA   | AAAAAA   | AAAAxx
+    1891 |       1 |   1 |    3 |   1 |     11 |      91 |      891 |        1891 |      1891 |     1891 | 182 |  183 | TUAAAA   | BAAAAA   | HHHHxx
+    3420 |       2 |   0 |    0 |   0 |      0 |      20 |      420 |        1420 |      3420 |     3420 |  40 |   41 | OBAAAA   | CAAAAA   | OOOOxx
+(3 rows)
+
+FETCH 5 FROM foo5;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+    9850 |       3 |   0 |    2 |   0 |     10 |      50 |      850 |        1850 |      4850 |     9850 | 100 |  101 | WOAAAA   | DAAAAA   | VVVVxx
+    7164 |       4 |   0 |    0 |   4 |      4 |      64 |      164 |        1164 |      2164 |     7164 | 128 |  129 | OPAAAA   | EAAAAA   | AAAAxx
+    8009 |       5 |   1 |    1 |   9 |      9 |       9 |        9 |           9 |      3009 |     8009 |  18 |   19 | BWAAAA   | FAAAAA   | HHHHxx
+(3 rows)
+
+-- FETCH ALL is not limited by cursor_fetch_limit
+FETCH ALL FROM foo5;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+    5057 |       6 |   1 |    1 |   7 |     17 |      57 |       57 |        1057 |        57 |     5057 | 114 |  115 | NMAAAA   | GAAAAA   | OOOOxx
+    6701 |       7 |   1 |    1 |   1 |      1 |       1 |      701 |         701 |      1701 |     6701 |   2 |    3 | TXAAAA   | HAAAAA   | VVVVxx
+    4321 |       8 |   1 |    1 |   1 |      1 |      21 |      321 |         321 |      4321 |     4321 |  42 |   43 | FKAAAA   | IAAAAA   | AAAAxx
+    3043 |       9 |   1 |    3 |   3 |      3 |      43 |       43 |        1043 |      3043 |     3043 |  86 |   87 | BNAAAA   | JAAAAA   | HHHHxx
+    1314 |      10 |   0 |    2 |   4 |     14 |      14 |      314 |        1314 |      1314 |     1314 |  28 |   29 | OYAAAA   | KAAAAA   | OOOOxx
+    1504 |      11 |   0 |    0 |   4 |      4 |       4 |      504 |        1504 |      1504 |     1504 |   8 |    9 | WFAAAA   | LAAAAA   | VVVVxx
+    5222 |      12 |   0 |    2 |   2 |      2 |      22 |      222 |        1222 |       222 |     5222 |  44 |   45 | WSAAAA   | MAAAAA   | AAAAxx
+    6243 |      13 |   1 |    3 |   3 |      3 |      43 |      243 |         243 |      1243 |     6243 |  86 |   87 | DGAAAA   | NAAAAA   | HHHHxx
+    5471 |      14 |   1 |    3 |   1 |     11 |      71 |      471 |        1471 |       471 |     5471 | 142 |  143 | LCAAAA   | OAAAAA   | OOOOxx
+    5006 |      15 |   0 |    2 |   6 |      6 |       6 |        6 |        1006 |         6 |     5006 |  12 |   13 | OKAAAA   | PAAAAA   | VVVVxx
+    5387 |      16 |   1 |    3 |   7 |      7 |      87 |      387 |        1387 |       387 |     5387 | 174 |  175 | FZAAAA   | QAAAAA   | AAAAxx
+    5785 |      17 |   1 |    1 |   5 |      5 |      85 |      785 |        1785 |       785 |     5785 | 170 |  171 | NOAAAA   | RAAAAA   | HHHHxx
+    6621 |      18 |   1 |    1 |   1 |      1 |      21 |      621 |         621 |      1621 |     6621 |  42 |   43 | RUAAAA   | SAAAAA   | OOOOxx
+    6969 |      19 |   1 |    1 |   9 |      9 |      69 |      969 |         969 |      1969 |     6969 | 138 |  139 | BIAAAA   | TAAAAA   | VVVVxx
+    9460 |      20 |   0 |    0 |   0 |      0 |      60 |      460 |        1460 |      4460 |     9460 | 120 |  121 | WZAAAA   | UAAAAA   | AAAAxx
+      59 |      21 |   1 |    3 |   9 |     19 |      59 |       59 |          59 |        59 |       59 | 118 |  119 | HCAAAA   | VAAAAA   | HHHHxx
+    8020 |      22 |   0 |    0 |   0 |      0 |      20 |       20 |          20 |      3020 |     8020 |  40 |   41 | MWAAAA   | WAAAAA   | OOOOxx
+    7695 |      23 |   1 |    3 |   5 |     15 |      95 |      695 |        1695 |      2695 |     7695 | 190 |  191 | ZJAAAA   | XAAAAA   | VVVVxx
+    3442 |      24 |   0 |    2 |   2 |      2 |      42 |      442 |        1442 |      3442 |     3442 |  84 |   85 | KCAAAA   | YAAAAA   | AAAAxx
+    5119 |      25 |   1 |    3 |   9 |     19 |      19 |      119 |        1119 |       119 |     5119 |  38 |   39 | XOAAAA   | ZAAAAA   | HHHHxx
+     646 |      26 |   0 |    2 |   6 |      6 |      46 |      646 |         646 |       646 |      646 |  92 |   93 | WYAAAA   | ABAAAA   | OOOOxx
+    9605 |      27 |   1 |    1 |   5 |      5 |       5 |      605 |        1605 |      4605 |     9605 |  10 |   11 | LFAAAA   | BBAAAA   | VVVVxx
+     263 |      28 |   1 |    3 |   3 |      3 |      63 |      263 |         263 |       263 |      263 | 126 |  127 | DKAAAA   | CBAAAA   | AAAAxx
+    3269 |      29 |   1 |    1 |   9 |      9 |      69 |      269 |        1269 |      3269 |     3269 | 138 |  139 | TVAAAA   | DBAAAA   | HHHHxx
+(24 rows)
+
+CLOSE foo5;
+reset cursor_fetch_limit;
diff --git a/src/test/regress/sql/portals.sql b/src/test/regress/sql/portals.sql
index fc4cccb96c0..17c389b7070 100644
--- a/src/test/regress/sql/portals.sql
+++ b/src/test/regress/sql/portals.sql
@@ -605,3 +605,31 @@ drop table toasted_data;
 fetch all in held_portal;
 
 reset default_toast_compression;
+
+-- Check memory limit for fetching from cursor
+set cursor_fetch_limit = '512B';
+
+BEGIN;
+DECLARE foo1 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2;
+DECLARE foo2 SCROLL CURSOR FOR SELECT * FROM tenk2;
+DECLARE foo3 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2;
+DECLARE foo4 SCROLL CURSOR FOR SELECT * FROM tenk2;
+
+FETCH 1 in foo1;
+FETCH 2 in foo2;
+FETCH 3 in foo3;
+-- Well, we've reached the limit
+FETCH 4 in foo4;
+-- Fetch the next batch
+FETCH 4 in foo4;
+FETCH backward 4 in foo4;
+
+-- Check cursor with hold
+DECLARE foo5 CURSOR WITH HOLD FOR SELECT * FROM tenk2 LIMIT 30;
+commit;
+FETCH 5 FROM foo5;
+FETCH 5 FROM foo5;
+-- FETCH ALL is not limited by cursor_fetch_limit
+FETCH ALL FROM foo5;
+CLOSE foo5;
+reset cursor_fetch_limit;
-- 
2.43.0

Reply via email to