diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 8837f83..d0463c7 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -67,6 +67,7 @@
 #include "utils/lsyscache.h"
 #include "utils/relcache.h"
 #include "utils/snapmgr.h"
+#include "utils/spccache.h"
 
 
 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
@@ -162,6 +163,26 @@ static const struct
 #define ConditionalLockTupleTuplock(rel, tup, mode) \
 	ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
+#ifdef USE_PREFETCH
+/*
+ * Maintains the current prefetch state so as to keep it ahead of buffer reads.
+ * Used to prefetch tid buffers.
+ */
+typedef struct
+{
+	int		next_item;
+	BlockNumber cur_hblkno;
+} PrefetchState;
+
+/*
+ * An arbitrary way to come up with a pre-fetch distance that grows with io
+ * concurrency, but is at least 10 and not more than the max effective io
+ * concurrency.
+ */
+#define PREFETCH_DISTANCE(io_concurrency) Min((io_concurrency) + 10, MAX_IO_CONCURRENCY)
+
+#endif
+
 /*
  * This table maps tuple lock strength values for each particular
  * MultiXactStatus value.
@@ -6990,6 +7011,44 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
 	/* *latestRemovedXid may still be invalid at end */
 }
 
+#ifdef USE_PREFETCH
+/*
+ * prefetch_buffer
+ *
+ * Pre-fetch 'prefetch_count' number of buffers.
+ * Continues to scan the tids array from the last position that was scanned
+ * for previous pre-fetching.
+ */
+static void
+prefetch_buffer(Relation rel, PrefetchState *prefetch_state,
+				ItemPointerData *tids, int nitems, int prefetch_count)
+{
+	BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
+	int		count = 0;
+	int		i;
+
+	for (i = prefetch_state->next_item; i < nitems && count < prefetch_count; i++)
+	{
+		ItemPointer htid = &tids[i];
+
+		if (cur_hblkno == InvalidBlockNumber ||
+			ItemPointerGetBlockNumber(htid) != cur_hblkno)
+		{
+			cur_hblkno = ItemPointerGetBlockNumber(htid);
+			PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
+			count++;
+		}
+	}
+
+	/*
+	 * Save the prefetch position so that next time we can continue from that
+	 * position.
+	 */
+	prefetch_state->next_item = i;
+	prefetch_state->cur_hblkno = cur_hblkno;
+}
+#endif
+
 /*
  * Get the latestRemovedXid from the heap pages pointed at by the index
  * tuples being deleted.
@@ -7011,6 +7070,10 @@ heap_compute_xid_horizon_for_tuples(Relation rel,
 	BlockNumber hblkno;
 	Buffer		buf = InvalidBuffer;
 	Page		hpage;
+#ifdef USE_PREFETCH
+	PrefetchState prefetch_state;
+	int			io_concurrency;
+#endif
 
 	/*
 	 * Sort to avoid repeated lookups for the same page, and to make it more
@@ -7021,21 +7084,13 @@ heap_compute_xid_horizon_for_tuples(Relation rel,
 	qsort((void *) tids, nitems, sizeof(ItemPointerData),
 		  (int (*) (const void *, const void *)) ItemPointerCompare);
 
-	/* prefetch all pages */
+	/* prefetch a fixed number of pages beforehand. */
 #ifdef USE_PREFETCH
-	hblkno = InvalidBlockNumber;
-	for (int i = 0; i < nitems; i++)
-	{
-		ItemPointer htid = &tids[i];
-
-		if (hblkno == InvalidBlockNumber ||
-			ItemPointerGetBlockNumber(htid) != hblkno)
-		{
-			hblkno = ItemPointerGetBlockNumber(htid);
-
-			PrefetchBuffer(rel, MAIN_FORKNUM, hblkno);
-		}
-	}
+	prefetch_state.next_item = 0;
+	prefetch_state.cur_hblkno = InvalidBlockNumber;
+	io_concurrency = get_tablespace_io_concurrency(rel->rd_rel->reltablespace);
+	prefetch_buffer(rel, &prefetch_state, tids, nitems,
+					PREFETCH_DISTANCE(io_concurrency));
 #endif
 
 	/* Iterate over all tids, and check their horizon */
@@ -7063,6 +7118,15 @@ heap_compute_xid_horizon_for_tuples(Relation rel,
 			hblkno = ItemPointerGetBlockNumber(htid);
 
 			buf = ReadBuffer(rel, hblkno);
+
+#ifdef USE_PREFETCH
+			/*
+			 * Need to maintain the prefetch distance, so prefetch a page each
+			 * time we read a new page.
+			 */
+			prefetch_buffer(rel, &prefetch_state, tids, nitems, 1);
+#endif
+
 			hpage = BufferGetPage(buf);
 
 			LockBuffer(buf, BUFFER_LOCK_SHARE);
