Alexander Pyhalov писал(а) 2026-01-23 09:18:
Alexander Pyhalov писал(а) 2026-01-13 13:44:
For now I start thinking we need some form of FETCH, which stops
fetching data based on batch size...
Hi.
To limit memory consumption, we actually have to retreive less data.
And we can do it only on the side of the foreign server. I've rewritten
the third patch. We introduce a new parameter - cursor_fetch_limit,
which is set by postgres_fdw. When it is set, fetching limited count of
records from the cursor is also limited by memory consumed by the
records. Of course, record size is some estimation (for example, we
don't know what out function will do).
This works as expected - in my tests with tables of large records,
backends, executing selects, were always restricted by about 2 GB of
RAM overall (without patch memory consumption easily grows up to 8 GB).
However, now when we got less tuples from executor, than expected, we
should recheck, if these are all tuples we can get. I've introduced
es_eof EState field to signal that there's no more tuples. Don't know
if it's the best way.
The issue which worried me is that we could change behavior for cursors,
which don't expect this (for example, if these cursors are used in
functions, which are executed via FDW). So I added new cursor option and
used it in postgres_fdw.
Also rearranged patches. While working with test cases I found that we
should avoid batch insert for big tuples, as it has significant
overhead. Switching from (limited) batch insert to foreign insert
allowed to decrease memory consumption in 3 times in some tests.
--
Best regards,
Alexander Pyhalov,
Postgres Professional
From 1b8fe1c7438966b639630eeabe08f1e48d711cd6 Mon Sep 17 00:00:00 2001
From: Alexander Pyhalov <[email protected]>
Date: Thu, 29 Jan 2026 16:18:40 +0300
Subject: [PATCH 1/3] Introduce function to estimate tuple size
Unlike heap_compute_data_size() it assumes that all toasts will be
detoasted.
---
src/backend/access/common/heaptuple.c | 54 +++++++++++++++++++++++++++
src/include/access/htup_details.h | 3 ++
2 files changed, 57 insertions(+)
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 11bec20e82e..7308d2ff4f0 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -57,6 +57,7 @@
#include "postgres.h"
+#include "access/detoast.h"
#include "access/heaptoast.h"
#include "access/sysattr.h"
#include "access/tupdesc_details.h"
@@ -266,6 +267,59 @@ heap_compute_data_size(TupleDesc tupleDesc,
return data_length;
}
+/*
+ * Estimate tuple size when it's transferred to the wire. Unlike
+ * heap_compute_data_size() it assumes that all toasts will be
+ * detoasted (but doesn't make assumptions about further
+ * processing).
+ */
+Size
+estimate_tuple_size(TupleDesc tupleDesc,
+ const Datum *values,
+ const bool *isnull)
+{
+ Size data_length = 0;
+ int i;
+ int numberOfAttributes = tupleDesc->natts;
+
+ for (i = 0; i < numberOfAttributes; i++)
+ {
+ Datum val;
+ CompactAttribute *atti;
+ bool is_varlena;
+ bool is_varwidth;
+
+ if (isnull[i])
+ continue;
+
+ val = values[i];
+ atti = TupleDescCompactAttr(tupleDesc, i);
+
+ is_varlena = (!atti->attbyval && atti->attlen == -1);
+ is_varwidth = (!atti->attbyval && atti->attlen == -2);
+
+ if (is_varlena)
+ {
+ data_length = att_nominal_alignby(data_length, atti->attalignby);
+ data_length += toast_raw_datum_size(val);
+ }
+ else if (is_varwidth)
+ {
+ data_length = att_nominal_alignby(data_length, atti->attalignby);
+ data_length += strlen(DatumGetCString(val)) + 1;
+ }
+ else
+ {
+ data_length = att_datum_alignby(data_length, atti->attalignby,
+ atti->attlen, val);
+ data_length = att_addlength_datum(data_length, atti->attlen,
+ val);
+ }
+ }
+
+ return data_length;
+}
+
/*
* Per-attribute helper for heap_fill_tuple and other routines building tuples.
*
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index d406825ff22..cb24e159509 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -808,6 +808,9 @@ HeapTupleClearHeapOnly(const HeapTupleData *tuple)
/* prototypes for functions in common/heaptuple.c */
extern Size heap_compute_data_size(TupleDesc tupleDesc,
const Datum *values, const bool *isnull);
+extern Size estimate_tuple_size(TupleDesc tupleDesc,
+ const Datum *values, const bool *isnull);
+
extern void heap_fill_tuple(TupleDesc tupleDesc,
const Datum *values, const bool *isnull,
char *data, Size data_size,
--
2.43.0
From fd324f39df941e2a692a9c950c8182ed07502140 Mon Sep 17 00:00:00 2001
From: Alexander Pyhalov <[email protected]>
Date: Tue, 16 Dec 2025 18:46:27 +0300
Subject: [PATCH 2/3] Limit batch_size for foreign insert with work_mem
Option "batch_size" can be set on foreign server level.
It can be very optimistic even for one table with
different tuple lengths. To prevent large memory usage
limit effective batch size with work_mem.
When tuple is more than work_mem, we don't form batch,
but fall through to usual foreign insert. This means
that executor can form batch, insert it and finish with
a usual insert, so there will be no pending batch inserts
to process. To avoid manipulation with possibly long lists,
we don't clear es_insert_pending_result_relations and
es_insert_pending_modifytables lists, but check in
ExecPendingInserts() if there are any pending tuples.
---
.../postgres_fdw/expected/postgres_fdw.out | 11 ++
contrib/postgres_fdw/sql/postgres_fdw.sql | 9 ++
src/backend/executor/nodeModifyTable.c | 149 ++++++++++--------
src/include/nodes/execnodes.h | 2 +
4 files changed, 107 insertions(+), 64 deletions(-)
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 6066510c7c0..cb3044c769a 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -7640,6 +7640,17 @@ select count(*) from tab_batch_sharded;
45
(1 row)
+delete from tab_batch_sharded;
+-- test batch insert with large tuples and switching from batch to usual insert
+set work_mem to 64;
+insert into tab_batch_sharded select 3, case when i%4 = 0 then lpad('a',65*1024,'a') else 'test'|| i end from generate_series(1, 100) i;
+select count(*) from tab_batch_sharded;
+ count
+-------
+ 100
+(1 row)
+
+reset work_mem;
drop table tab_batch_local;
drop table tab_batch_sharded;
drop table tab_batch_sharded_p1_remote;
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index 4f7ab2ed0ac..a084afc8de7 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -1944,10 +1944,19 @@ create foreign table tab_batch_sharded_p1 partition of tab_batch_sharded
server loopback options (table_name 'tab_batch_sharded_p1_remote');
insert into tab_batch_sharded select * from tab_batch_local;
select count(*) from tab_batch_sharded;
+delete from tab_batch_sharded;
+-- test batch insert with large tuples and switching from batch to usual insert
+set work_mem to 64;
+insert into tab_batch_sharded select 3, case when i%4 = 0 then lpad('a',65*1024,'a') else 'test'|| i end from generate_series(1, 100) i;
+select count(*) from tab_batch_sharded;
+reset work_mem;
+
drop table tab_batch_local;
drop table tab_batch_sharded;
drop table tab_batch_sharded_p1_remote;
+
+
alter server loopback options (drop batch_size);
-- ===================================================================
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 7d7411a7056..fcbffdd2dec 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -942,87 +942,100 @@ ExecInsert(ModifyTableContext *context,
*/
if (resultRelInfo->ri_BatchSize > 1)
{
- bool flushed = false;
+ Size tuple_len;
+
+ /* Compute length of the current tuple */
+ slot_getallattrs(slot);
+ tuple_len = estimate_tuple_size(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull);
/*
- * When we've reached the desired batch size, perform the
- * insertion.
+ * When we've reached the desired batch size or exceeded work_mem,
+ * perform the insertion.
*/
- if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
+ if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize ||
+ ((tuple_len + resultRelInfo->ri_BatchMemoryUsed > work_mem * 1024) && (resultRelInfo->ri_NumSlots > 0)))
{
ExecBatchInsert(mtstate, resultRelInfo,
resultRelInfo->ri_Slots,
resultRelInfo->ri_PlanSlots,
resultRelInfo->ri_NumSlots,
estate, canSetTag);
- flushed = true;
}
- oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
-
- if (resultRelInfo->ri_Slots == NULL)
+ if (tuple_len < work_mem * 1024)
{
- resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
- resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
- }
+ oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
- /*
- * Initialize the batch slots. We don't know how many slots will
- * be needed, so we initialize them as the batch grows, and we
- * keep them across batches. To mitigate an inefficiency in how
- * resource owner handles objects with many references (as with
- * many slots all referencing the same tuple descriptor) we copy
- * the appropriate tuple descriptor for each slot.
- */
- if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
- {
- TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
- TupleDesc plan_tdesc =
- CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
+ if (resultRelInfo->ri_Slots == NULL)
+ {
+ resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
+ resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
+ }
- resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
- MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
+ /*
+ * Initialize the batch slots. We don't know how many slots
+ * will be needed, so we initialize them as the batch grows,
+ * and we keep them across batches. To mitigate an
+ * inefficiency in how resource owner handles objects with
+ * many references (as with many slots all referencing the
+ * same tuple descriptor) we copy the appropriate tuple
+ * descriptor for each slot.
+ */
+ if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
+ {
+ TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
+ TupleDesc plan_tdesc =
+ CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
- resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
- MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
+ resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
+ MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
- /* remember how many batch slots we initialized */
- resultRelInfo->ri_NumSlotsInitialized++;
- }
+ resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
+ MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
- ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
- slot);
+ /* remember how many batch slots we initialized */
+ resultRelInfo->ri_NumSlotsInitialized++;
+ }
- ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
- planSlot);
+ ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
+ slot);
- /*
- * If these are the first tuples stored in the buffers, add the
- * target rel and the mtstate to the
- * es_insert_pending_result_relations and
- * es_insert_pending_modifytables lists respectively, except in
- * the case where flushing was done above, in which case they
- * would already have been added to the lists, so no need to do
- * this.
- */
- if (resultRelInfo->ri_NumSlots == 0 && !flushed)
- {
- Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
- resultRelInfo));
- estate->es_insert_pending_result_relations =
- lappend(estate->es_insert_pending_result_relations,
- resultRelInfo);
- estate->es_insert_pending_modifytables =
- lappend(estate->es_insert_pending_modifytables, mtstate);
- }
- Assert(list_member_ptr(estate->es_insert_pending_result_relations,
- resultRelInfo));
+ ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
+ planSlot);
+
+ /*
+ * If these are the first tuples stored in the buffers, add
+ * the target rel and the mtstate to the
+ * es_insert_pending_result_relations and
+ * es_insert_pending_modifytables lists respectively, except
+ * in the case where flushing was done above, in which case
+ * they would already have been added to the lists, so no need
+ * to do this.
+ */
+ if (resultRelInfo->ri_NumSlots == 0 && !resultRelInfo->ri_ExecutorPendingStateModified)
+ {
+ Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
+ resultRelInfo));
+ estate->es_insert_pending_result_relations =
+ lappend(estate->es_insert_pending_result_relations,
+ resultRelInfo);
+ estate->es_insert_pending_modifytables =
+ lappend(estate->es_insert_pending_modifytables, mtstate);
+ resultRelInfo->ri_ExecutorPendingStateModified = true;
+ }
+ Assert(list_member_ptr(estate->es_insert_pending_result_relations,
+ resultRelInfo));
- resultRelInfo->ri_NumSlots++;
+ resultRelInfo->ri_NumSlots++;
+ resultRelInfo->ri_BatchMemoryUsed += tuple_len;
+ /* We've flushed batch if it is too big. */
+ Assert(resultRelInfo->ri_BatchMemoryUsed < work_mem * 1024);
- MemoryContextSwitchTo(oldContext);
+ MemoryContextSwitchTo(oldContext);
- return NULL;
+ return NULL;
+ }
+ /* else do usual foreign insert */
}
/*
@@ -1418,6 +1431,7 @@ ExecBatchInsert(ModifyTableState *mtstate,
ExecClearTuple(planSlots[i]);
}
resultRelInfo->ri_NumSlots = 0;
+ resultRelInfo->ri_BatchMemoryUsed = 0;
}
/*
@@ -1436,11 +1450,18 @@ ExecPendingInserts(EState *estate)
ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
Assert(mtstate);
- ExecBatchInsert(mtstate, resultRelInfo,
- resultRelInfo->ri_Slots,
- resultRelInfo->ri_PlanSlots,
- resultRelInfo->ri_NumSlots,
- estate, mtstate->canSetTag);
+
+ /*
+ * Batch insert could switch to non-batched insert, in this case we
+ * could have no filled slots.
+ */
+ if (resultRelInfo->ri_NumSlots > 0)
+ ExecBatchInsert(mtstate, resultRelInfo,
+ resultRelInfo->ri_Slots,
+ resultRelInfo->ri_PlanSlots,
+ resultRelInfo->ri_NumSlots,
+ estate, mtstate->canSetTag);
+ resultRelInfo->ri_ExecutorPendingStateModified = false;
}
list_free(estate->es_insert_pending_result_relations);
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index f8053d9e572..3220f9b2574 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -544,6 +544,8 @@ typedef struct ResultRelInfo
int ri_NumSlots; /* number of slots in the array */
int ri_NumSlotsInitialized; /* number of initialized slots */
int ri_BatchSize; /* max slots inserted in a single batch */
+ int ri_BatchMemoryUsed; /* memory used by batch */
+ bool ri_ExecutorPendingStateModified; /* true if member of estate->es_insert_pending_result_relations */
TupleTableSlot **ri_Slots; /* input tuples for batch insert */
TupleTableSlot **ri_PlanSlots;
--
2.43.0
From 2e2ea737629334cfee24e47f44dd5614ff9802d9 Mon Sep 17 00:00:00 2001
From: Alexander Pyhalov <[email protected]>
Date: Thu, 15 Jan 2026 09:20:51 +0300
Subject: [PATCH 3/3] Introduce memory limited cursors and use them in
postgres_fdw
Parameter "cursor_fetch_limit" limits the number of records,
which is retrieved by FETCH from memory limited cursors
statement, based on batch size. FETCH ALL
is not limited.
When cursor_fetch_limit >= 0 and records count is passed to
ExecutorRun(), we can't longer rely on fact that when executor
returned less records than expected, there's no more records to fetch,
so we introduce additional EState field which after ExecutorRun()
call says if there are more records.
As we explicitly mark which cursors can be subject to
memory limits, we avoid affecting some uncontrolled code.
---
contrib/postgres_fdw/connection.c | 13 +++
contrib/postgres_fdw/postgres_fdw.c | 25 ++++-
src/backend/executor/execMain.c | 37 +++++-
src/backend/executor/execUtils.c | 1 +
src/backend/parser/gram.y | 9 +-
src/backend/tcop/pquery.c | 56 ++++++++--
src/backend/utils/misc/guc_parameters.dat | 9 ++
src/backend/utils/misc/guc_tables.c | 1 +
src/backend/utils/misc/postgresql.conf.sample | 1 +
src/include/executor/executor.h | 4 +-
src/include/nodes/execnodes.h | 1 +
src/include/nodes/parsenodes.h | 1 +
src/include/parser/kwlist.h | 2 +
src/test/regress/expected/portals.out | 105 ++++++++++++++++++
src/test/regress/sql/portals.sql | 28 +++++
15 files changed, 275 insertions(+), 18 deletions(-)
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index 487a1a23170..d6c6c26b393 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -802,6 +802,19 @@ configure_remote_session(PGconn *conn)
do_sql_command(conn, "SET extra_float_digits = 3");
else
do_sql_command(conn, "SET extra_float_digits = 2");
+ /*
+ * Limit fetch size with work_mem to avoid uncontrolled memory
+ * consumption.
+ */
+ if (remoteversion >= 190000)
+ {
+ char sql[64];
+ int fetch_limit;
+
+ fetch_limit = (work_mem * 1024 > MaxAllocSize) ? MaxAllocSize : work_mem * 1024;
+ snprintf(sql, sizeof(sql), "SET cursor_fetch_limit = %d", fetch_limit);
+ do_sql_command(conn, sql);
+ }
}
/*
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 3572689e33b..25d708fe4f5 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -3731,6 +3731,8 @@ create_cursor(ForeignScanState *node)
PGconn *conn = fsstate->conn;
StringInfoData buf;
PGresult *res;
+ const char *memory_limited = "";
+ int remoteversion;
/* First, process a pending asynchronous request, if any. */
if (fsstate->conn_state->pendingAreq)
@@ -3757,8 +3759,13 @@ create_cursor(ForeignScanState *node)
/* Construct the DECLARE CURSOR command */
initStringInfo(&buf);
- appendStringInfo(&buf, "DECLARE c%u CURSOR FOR\n%s",
- fsstate->cursor_number, fsstate->query);
+
+ remoteversion = PQserverVersion(conn);
+ if (remoteversion >= 190000)
+ memory_limited = "MEMORY LIMITED ";
+
+ appendStringInfo(&buf, "DECLARE c%u %sCURSOR FOR\n%s",
+ fsstate->cursor_number, memory_limited, fsstate->query);
/*
* Notice that we pass NULL for paramTypes, thus forcing the remote server
@@ -3865,8 +3872,11 @@ fetch_more_data(ForeignScanState *node)
if (fsstate->fetch_ct_2 < 2)
fsstate->fetch_ct_2++;
- /* Must be EOF if we didn't get as many tuples as we asked for. */
- fsstate->eof_reached = (numrows < fsstate->fetch_size);
+ /*
+ * Must be EOF if we got 0 rows. We can get less rows than we asked for
+ * even if there's more data if batch of "fetch_size" exceeds work_mem.
+ */
+ fsstate->eof_reached = (numrows == 0);
PQclear(res);
@@ -5025,6 +5035,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
char fetch_sql[64];
int fetch_size;
ListCell *lc;
+ const char *memory_limited = "";
/* Initialize workspace state */
astate.rel = relation;
@@ -5196,7 +5207,11 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
*/
cursor_number = GetCursorNumber(conn);
initStringInfo(&sql);
- appendStringInfo(&sql, "DECLARE c%u CURSOR FOR ", cursor_number);
+
+ if (server_version_num >= 190000)
+ memory_limited = "MEMORY LIMITED ";
+
+ appendStringInfo(&sql, "DECLARE c%u %sCURSOR FOR ", cursor_number, memory_limited);
deparseAnalyzeSql(&sql, relation, method, sample_frac, &astate.retrieved_attrs);
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index bfd3ebc601e..8295f7639cb 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -64,6 +64,9 @@
#include "utils/snapmgr.h"
+/* max batch for fetching from cursor */
+int cursor_fetch_limit = 4096*1024;
+
/* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
ExecutorStart_hook_type ExecutorStart_hook = NULL;
ExecutorRun_hook_type ExecutorRun_hook = NULL;
@@ -343,6 +346,7 @@ standard_ExecutorRun(QueryDesc *queryDesc,
* startup tuple receiver, if we will be emitting tuples
*/
estate->es_processed = 0;
+ estate->es_eof = false;
sendTuples = (operation == CMD_SELECT ||
queryDesc->plannedstmt->hasReturning);
@@ -1669,11 +1673,13 @@ ExecutePlan(QueryDesc *queryDesc,
bool use_parallel_mode;
TupleTableSlot *slot;
uint64 current_tuple_count;
+ Size current_batch_size;
/*
* initialize local variables
*/
current_tuple_count = 0;
+ current_batch_size = 0;
/*
* Set the direction.
@@ -1715,7 +1721,10 @@ ExecutePlan(QueryDesc *queryDesc,
* process so we just end the loop...
*/
if (TupIsNull(slot))
+ {
+ estate->es_eof = true;
break;
+ }
/*
* If we have a junk filter, then project a new tuple with the junk
@@ -1740,7 +1749,10 @@ ExecutePlan(QueryDesc *queryDesc,
* end the loop.
*/
if (!dest->receiveSlot(slot, dest))
+ {
+ estate->es_eof = true;
break;
+ }
}
/*
@@ -1757,8 +1769,29 @@ ExecutePlan(QueryDesc *queryDesc,
* means no limit.
*/
current_tuple_count++;
- if (numberTuples && numberTuples == current_tuple_count)
- break;
+ if (numberTuples)
+ {
+ if (numberTuples == current_tuple_count)
+ break;
+
+ /*
+ * If batch limit in bytes is specified, count current batch size
+ * and exit if limit is reached
+ */
+ if (cursor_fetch_limit >= 0 && (estate->es_top_eflags & EXEC_FLAG_MEMORY_LIMITED))
+ {
+ Size tuple_len;
+
+ /* Compute length of the current tuple */
+ slot_getallattrs(slot);
+ tuple_len = estimate_tuple_size(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull);
+ current_batch_size += tuple_len;
+
+ if (current_batch_size > cursor_fetch_limit)
+ break;
+ }
+ }
+
}
/*
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index a7955e476f9..7f820388878 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -143,6 +143,7 @@ CreateExecutorState(void)
estate->es_processed = 0;
estate->es_total_processed = 0;
+ estate->es_eof = false;
estate->es_top_eflags = 0;
estate->es_instrument = 0;
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 713ee5c10a2..457c1c456ef 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -750,10 +750,10 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
KEEP KEY KEYS
LABEL LANGUAGE LARGE_P LAST_P LATERAL_P
- LEADING LEAKPROOF LEAST LEFT LEVEL LIKE LIMIT LISTEN LOAD LOCAL
+ LEADING LEAKPROOF LEAST LEFT LEVEL LIKE LIMIT LIMITED LISTEN LOAD LOCAL
LOCALTIME LOCALTIMESTAMP LOCATION LOCK_P LOCKED LOGGED LSN_P
- MAPPING MATCH MATCHED MATERIALIZED MAXVALUE MERGE MERGE_ACTION METHOD
+ MAPPING MATCH MATCHED MATERIALIZED MAXVALUE MEMORY MERGE MERGE_ACTION METHOD
MINUTE_P MINVALUE MODE MONTH_P MOVE
NAME_P NAMES NATIONAL NATURAL NCHAR NESTED NEW NEXT NFC NFD NFKC NFKD NO
@@ -12939,6 +12939,7 @@ cursor_options: /*EMPTY*/ { $$ = 0; }
| cursor_options BINARY { $$ = $1 | CURSOR_OPT_BINARY; }
| cursor_options ASENSITIVE { $$ = $1 | CURSOR_OPT_ASENSITIVE; }
| cursor_options INSENSITIVE { $$ = $1 | CURSOR_OPT_INSENSITIVE; }
+ | cursor_options MEMORY LIMITED { $$ = $1 | CURSOR_OPT_MEMORY_LIMITED; }
;
opt_hold: /* EMPTY */ { $$ = 0; }
@@ -18039,6 +18040,7 @@ unreserved_keyword:
| LAST_P
| LEAKPROOF
| LEVEL
+ | LIMITED
| LISTEN
| LOAD
| LOCAL
@@ -18052,6 +18054,7 @@ unreserved_keyword:
| MATCHED
| MATERIALIZED
| MAXVALUE
+ | MEMORY
| MERGE
| METHOD
| MINUTE_P
@@ -18657,6 +18660,7 @@ bare_label_keyword:
| LEFT
| LEVEL
| LIKE
+ | LIMITED
| LISTEN
| LOAD
| LOCAL
@@ -18672,6 +18676,7 @@ bare_label_keyword:
| MATCHED
| MATERIALIZED
| MAXVALUE
+ | MEMORY
| MERGE
| MERGE_ACTION
| METHOD
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index d8fc75d0bb9..b6995d7e706 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -17,6 +17,7 @@
#include <limits.h>
+#include "access/htup_details.h"
#include "access/xact.h"
#include "commands/prepare.h"
#include "executor/executor.h"
@@ -44,7 +45,7 @@ static void ProcessQuery(PlannedStmt *plan,
QueryCompletion *qc);
static void FillPortalStore(Portal portal, bool isTopLevel);
static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
- DestReceiver *dest);
+ DestReceiver *dest, bool *eof);
static uint64 PortalRunSelect(Portal portal, bool forward, long count,
DestReceiver *dest);
static void PortalRunUtility(Portal portal, PlannedStmt *pstmt,
@@ -507,6 +508,8 @@ PortalStart(Portal portal, ParamListInfo params,
else
myeflags = eflags;
+ if (portal->cursorOptions & CURSOR_OPT_MEMORY_LIMITED)
+ myeflags |= EXEC_FLAG_MEMORY_LIMITED;
/*
* Call ExecutorStart to prepare the plan for execution
*/
@@ -864,6 +867,7 @@ PortalRunSelect(Portal portal,
QueryDesc *queryDesc;
ScanDirection direction;
uint64 nprocessed;
+ bool eof;
/*
* NB: queryDesc will be NULL if we are fetching from a held cursor or a
@@ -909,20 +913,23 @@ PortalRunSelect(Portal portal,
count = 0;
if (portal->holdStore)
- nprocessed = RunFromStore(portal, direction, (uint64) count, dest);
+ {
+ nprocessed = RunFromStore(portal, direction, (uint64) count, dest, &eof);
+ }
else
{
PushActiveSnapshot(queryDesc->snapshot);
ExecutorRun(queryDesc, direction, (uint64) count);
nprocessed = queryDesc->estate->es_processed;
PopActiveSnapshot();
+ eof = queryDesc->estate->es_eof;
}
if (!ScanDirectionIsNoMovement(direction))
{
if (nprocessed > 0)
portal->atStart = false; /* OK to go backward now */
- if (count == 0 || nprocessed < (uint64) count)
+ if (count == 0 || eof)
portal->atEnd = true; /* we retrieved 'em all */
portal->portalPos += nprocessed;
}
@@ -948,13 +955,16 @@ PortalRunSelect(Portal portal,
count = 0;
if (portal->holdStore)
- nprocessed = RunFromStore(portal, direction, (uint64) count, dest);
+ {
+ nprocessed = RunFromStore(portal, direction, (uint64) count, dest, &eof);
+ }
else
{
PushActiveSnapshot(queryDesc->snapshot);
ExecutorRun(queryDesc, direction, (uint64) count);
nprocessed = queryDesc->estate->es_processed;
PopActiveSnapshot();
+ eof = queryDesc->estate->es_eof;
}
if (!ScanDirectionIsNoMovement(direction))
@@ -964,7 +974,7 @@ PortalRunSelect(Portal portal,
portal->atEnd = false; /* OK to go forward now */
portal->portalPos++; /* adjust for endpoint case */
}
- if (count == 0 || nprocessed < (uint64) count)
+ if (count == 0 || eof)
{
portal->atStart = true; /* we retrieved 'em all */
portal->portalPos = 0;
@@ -1049,15 +1059,18 @@ FillPortalStore(Portal portal, bool isTopLevel)
*/
static uint64
RunFromStore(Portal portal, ScanDirection direction, uint64 count,
- DestReceiver *dest)
+ DestReceiver *dest, bool *eof)
{
uint64 current_tuple_count = 0;
+ Size current_batch_size = 0;
TupleTableSlot *slot;
slot = MakeSingleTupleTableSlot(portal->tupDesc, &TTSOpsMinimalTuple);
dest->rStartup(dest, CMD_SELECT, portal->tupDesc);
+ *eof = false;
+
if (ScanDirectionIsNoMovement(direction))
{
/* do nothing except start/stop the destination */
@@ -1079,7 +1092,10 @@ RunFromStore(Portal portal, ScanDirection direction, uint64 count,
MemoryContextSwitchTo(oldcontext);
if (!ok)
+ {
+ *eof = true;
break;
+ }
/*
* If we are not able to send the tuple, we assume the destination
@@ -1087,7 +1103,24 @@ RunFromStore(Portal portal, ScanDirection direction, uint64 count,
* end the loop.
*/
if (!dest->receiveSlot(slot, dest))
+ {
+ *eof = true;
break;
+ }
+
+ /*
+ * If batch limit in bytes is specified, count current batch
+ * size.
+ */
+ if (cursor_fetch_limit > 0 && count && (portal->cursorOptions & CURSOR_OPT_MEMORY_LIMITED))
+ {
+ Size tuple_len;
+
+ /* Compute length of the current tuple. */
+ slot_getallattrs(slot);
+ tuple_len = estimate_tuple_size(slot->tts_tupleDescriptor, slot->tts_values, slot->tts_isnull);
+ current_batch_size += tuple_len;
+ }
ExecClearTuple(slot);
@@ -1097,8 +1130,15 @@ RunFromStore(Portal portal, ScanDirection direction, uint64 count,
* means no limit.
*/
current_tuple_count++;
- if (count && count == current_tuple_count)
- break;
+ if (count)
+ {
+ if (count == current_tuple_count)
+ break;
+
+ /* Exit if batch limit is reached */
+ if (current_batch_size > cursor_fetch_limit)
+ break;
+ }
}
}
diff --git a/src/backend/utils/misc/guc_parameters.dat b/src/backend/utils/misc/guc_parameters.dat
index 7c60b125564..502e965843e 100644
--- a/src/backend/utils/misc/guc_parameters.dat
+++ b/src/backend/utils/misc/guc_parameters.dat
@@ -522,6 +522,15 @@
assign_hook => 'assign_createrole_self_grant',
},
+{ name => 'cursor_fetch_limit', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_MEM',
+ short_desc => 'Sets the maximum size in bytes for fetching from cursor',
+ flags => 'GUC_UNIT_BYTE',
+ variable => 'cursor_fetch_limit',
+ boot_val => '4194304',
+ min => '-1',
+ max => 'MaxAllocSize',
+},
+
{ name => 'cursor_tuple_fraction', type => 'real', context => 'PGC_USERSET', group => 'QUERY_TUNING_OTHER',
short_desc => 'Sets the planner\'s estimate of the fraction of a cursor\'s rows that will be retrieved.',
flags => 'GUC_EXPLAIN',
diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c
index 73ff6ad0a32..bf40d407494 100644
--- a/src/backend/utils/misc/guc_tables.c
+++ b/src/backend/utils/misc/guc_tables.c
@@ -50,6 +50,7 @@
#include "commands/vacuum.h"
#include "common/file_utils.h"
#include "common/scram-common.h"
+#include "executor/executor.h"
#include "jit/jit.h"
#include "libpq/auth.h"
#include "libpq/libpq.h"
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index dc9e2255f8a..d0ce6f52e39 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -468,6 +468,7 @@
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
+#cursor_fetch_limit = 4MB # maximum size in bytes for fetching from cursor
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index 5929aabc353..ef6ff711092 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -70,7 +70,7 @@
#define EXEC_FLAG_MARK 0x0010 /* need mark/restore */
#define EXEC_FLAG_SKIP_TRIGGERS 0x0020 /* skip AfterTrigger setup */
#define EXEC_FLAG_WITH_NO_DATA 0x0040 /* REFRESH ... WITH NO DATA */
-
+#define EXEC_FLAG_MEMORY_LIMITED 0x0080 /* memory limited */
/* Hook for plugins to get control in ExecutorStart() */
typedef void (*ExecutorStart_hook_type) (QueryDesc *queryDesc, int eflags);
@@ -96,6 +96,8 @@ typedef bool (*ExecutorCheckPerms_hook_type) (List *rangeTable,
bool ereport_on_violation);
extern PGDLLIMPORT ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook;
+/* GUC variables */
+extern PGDLLIMPORT int cursor_fetch_limit;
/*
* prototypes from functions in execAmi.c
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 3220f9b2574..ecd0edd390c 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -719,6 +719,7 @@ typedef struct EState
* ExecutorRun() call. */
uint64 es_total_processed; /* total # of tuples aggregated across all
* ExecutorRun() calls. */
+ bool es_eof; /* true if no more data to process */
int es_top_eflags; /* eflags passed to ExecutorStart */
int es_instrument; /* OR of InstrumentOption flags */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 646d6ced763..1bf77513bb6 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -3418,6 +3418,7 @@ typedef struct SecLabelStmt
#define CURSOR_OPT_INSENSITIVE 0x0008 /* INSENSITIVE */
#define CURSOR_OPT_ASENSITIVE 0x0010 /* ASENSITIVE */
#define CURSOR_OPT_HOLD 0x0020 /* WITH HOLD */
+#define CURSOR_OPT_MEMORY_LIMITED 0x0040 /* MEMORY LIMITED */
/* these planner-control flags do not correspond to any SQL grammar: */
#define CURSOR_OPT_FAST_PLAN 0x0100 /* prefer fast-start plan */
#define CURSOR_OPT_GENERIC_PLAN 0x0200 /* force use of generic plan */
diff --git a/src/include/parser/kwlist.h b/src/include/parser/kwlist.h
index f7753c5c8a8..0f37469bb21 100644
--- a/src/include/parser/kwlist.h
+++ b/src/include/parser/kwlist.h
@@ -261,6 +261,7 @@ PG_KEYWORD("left", LEFT, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL)
PG_KEYWORD("level", LEVEL, UNRESERVED_KEYWORD, BARE_LABEL)
PG_KEYWORD("like", LIKE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL)
PG_KEYWORD("limit", LIMIT, RESERVED_KEYWORD, AS_LABEL)
+PG_KEYWORD("limited", LIMITED, UNRESERVED_KEYWORD, BARE_LABEL)
PG_KEYWORD("listen", LISTEN, UNRESERVED_KEYWORD, BARE_LABEL)
PG_KEYWORD("load", LOAD, UNRESERVED_KEYWORD, BARE_LABEL)
PG_KEYWORD("local", LOCAL, UNRESERVED_KEYWORD, BARE_LABEL)
@@ -276,6 +277,7 @@ PG_KEYWORD("match", MATCH, UNRESERVED_KEYWORD, BARE_LABEL)
PG_KEYWORD("matched", MATCHED, UNRESERVED_KEYWORD, BARE_LABEL)
PG_KEYWORD("materialized", MATERIALIZED, UNRESERVED_KEYWORD, BARE_LABEL)
PG_KEYWORD("maxvalue", MAXVALUE, UNRESERVED_KEYWORD, BARE_LABEL)
+PG_KEYWORD("memory", MEMORY, UNRESERVED_KEYWORD, BARE_LABEL)
PG_KEYWORD("merge", MERGE, UNRESERVED_KEYWORD, BARE_LABEL)
PG_KEYWORD("merge_action", MERGE_ACTION, COL_NAME_KEYWORD, BARE_LABEL)
PG_KEYWORD("method", METHOD, UNRESERVED_KEYWORD, BARE_LABEL)
diff --git a/src/test/regress/expected/portals.out b/src/test/regress/expected/portals.out
index 31f77abc446..c1fc04761dc 100644
--- a/src/test/regress/expected/portals.out
+++ b/src/test/regress/expected/portals.out
@@ -1561,3 +1561,108 @@ fetch all in held_portal;
(1 row)
reset default_toast_compression;
+-- Check memory limit for fetching from cursor
+set cursor_fetch_limit = '512B';
+BEGIN;
+DECLARE foo1 SCROLL MEMORY LIMITED CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2;
+DECLARE foo2 SCROLL MEMORY LIMITED CURSOR FOR SELECT * FROM tenk2;
+DECLARE foo3 SCROLL MEMORY LIMITED CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2;
+DECLARE foo4 SCROLL MEMORY LIMITED CURSOR FOR SELECT * FROM tenk2;
+FETCH 1 in foo1;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx
+(1 row)
+
+FETCH 2 in foo2;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx
+ 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx
+(2 rows)
+
+FETCH 3 in foo3;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx
+ 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx
+ 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx
+(3 rows)
+
+-- Well, we've reached the limit
+FETCH 4 in foo4;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx
+ 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx
+ 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx
+(3 rows)
+
+-- Fetch the next batch
+FETCH 4 in foo4;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx
+ 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx
+ 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx
+(3 rows)
+
+FETCH backward 4 in foo4;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx
+ 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx
+ 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx
+(3 rows)
+
+-- Check cursor with hold
+DECLARE foo5 MEMORY LIMITED CURSOR WITH HOLD FOR SELECT * FROM tenk2 LIMIT 30;
+commit;
+FETCH 5 FROM foo5;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx
+ 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx
+ 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx
+(3 rows)
+
+FETCH 5 FROM foo5;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx
+ 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx
+ 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx
+(3 rows)
+
+-- FETCH ALL is not limited by cursor_fetch_limit
+FETCH ALL FROM foo5;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+ 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx
+ 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx
+ 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx
+ 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx
+ 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx
+ 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx
+ 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx
+ 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx
+ 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx
+ 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx
+ 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx
+ 5785 | 17 | 1 | 1 | 5 | 5 | 85 | 785 | 1785 | 785 | 5785 | 170 | 171 | NOAAAA | RAAAAA | HHHHxx
+ 6621 | 18 | 1 | 1 | 1 | 1 | 21 | 621 | 621 | 1621 | 6621 | 42 | 43 | RUAAAA | SAAAAA | OOOOxx
+ 6969 | 19 | 1 | 1 | 9 | 9 | 69 | 969 | 969 | 1969 | 6969 | 138 | 139 | BIAAAA | TAAAAA | VVVVxx
+ 9460 | 20 | 0 | 0 | 0 | 0 | 60 | 460 | 1460 | 4460 | 9460 | 120 | 121 | WZAAAA | UAAAAA | AAAAxx
+ 59 | 21 | 1 | 3 | 9 | 19 | 59 | 59 | 59 | 59 | 59 | 118 | 119 | HCAAAA | VAAAAA | HHHHxx
+ 8020 | 22 | 0 | 0 | 0 | 0 | 20 | 20 | 20 | 3020 | 8020 | 40 | 41 | MWAAAA | WAAAAA | OOOOxx
+ 7695 | 23 | 1 | 3 | 5 | 15 | 95 | 695 | 1695 | 2695 | 7695 | 190 | 191 | ZJAAAA | XAAAAA | VVVVxx
+ 3442 | 24 | 0 | 2 | 2 | 2 | 42 | 442 | 1442 | 3442 | 3442 | 84 | 85 | KCAAAA | YAAAAA | AAAAxx
+ 5119 | 25 | 1 | 3 | 9 | 19 | 19 | 119 | 1119 | 119 | 5119 | 38 | 39 | XOAAAA | ZAAAAA | HHHHxx
+ 646 | 26 | 0 | 2 | 6 | 6 | 46 | 646 | 646 | 646 | 646 | 92 | 93 | WYAAAA | ABAAAA | OOOOxx
+ 9605 | 27 | 1 | 1 | 5 | 5 | 5 | 605 | 1605 | 4605 | 9605 | 10 | 11 | LFAAAA | BBAAAA | VVVVxx
+ 263 | 28 | 1 | 3 | 3 | 3 | 63 | 263 | 263 | 263 | 263 | 126 | 127 | DKAAAA | CBAAAA | AAAAxx
+ 3269 | 29 | 1 | 1 | 9 | 9 | 69 | 269 | 1269 | 3269 | 3269 | 138 | 139 | TVAAAA | DBAAAA | HHHHxx
+(24 rows)
+
+CLOSE foo5;
+reset cursor_fetch_limit;
diff --git a/src/test/regress/sql/portals.sql b/src/test/regress/sql/portals.sql
index fc4cccb96c0..6d77c89137e 100644
--- a/src/test/regress/sql/portals.sql
+++ b/src/test/regress/sql/portals.sql
@@ -605,3 +605,31 @@ drop table toasted_data;
fetch all in held_portal;
reset default_toast_compression;
+
+-- Check memory limit for fetching from cursor
+set cursor_fetch_limit = '512B';
+
+BEGIN;
+DECLARE foo1 SCROLL MEMORY LIMITED CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2;
+DECLARE foo2 SCROLL MEMORY LIMITED CURSOR FOR SELECT * FROM tenk2;
+DECLARE foo3 SCROLL MEMORY LIMITED CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2;
+DECLARE foo4 SCROLL MEMORY LIMITED CURSOR FOR SELECT * FROM tenk2;
+
+FETCH 1 in foo1;
+FETCH 2 in foo2;
+FETCH 3 in foo3;
+-- Well, we've reached the limit
+FETCH 4 in foo4;
+-- Fetch the next batch
+FETCH 4 in foo4;
+FETCH backward 4 in foo4;
+
+-- Check cursor with hold
+DECLARE foo5 MEMORY LIMITED CURSOR WITH HOLD FOR SELECT * FROM tenk2 LIMIT 30;
+commit;
+FETCH 5 FROM foo5;
+FETCH 5 FROM foo5;
+-- FETCH ALL is not limited by cursor_fetch_limit
+FETCH ALL FROM foo5;
+CLOSE foo5;
+reset cursor_fetch_limit;
--
2.43.0