diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index d64df31..d3f11f5 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -300,7 +300,10 @@ index_beginscan_internal(Relation indexRelation,
 									  PointerGetDatum(indexRelation),
 									  Int32GetDatum(nkeys),
 									  Int32GetDatum(norderbys)));
-
+	
+	scan->heap_tids_seen = 0;
+	scan->heap_tids_fetched = 0;
+	
 	return scan;
 }
 
@@ -469,6 +472,11 @@ index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
 	}
 
 	pgstat_count_index_tuples(scan->indexRelation, 1);
+	if (scan->heap_tids_seen++ >= (~0)) {
+		/* Avoid integer overflow */
+		scan->heap_tids_seen = 1;
+		scan->heap_tids_fetched = 0;
+	}
 
 	/* Return the TID of the tuple we found. */
 	return &scan->xs_ctup.t_self;
@@ -508,6 +516,7 @@ index_fetch_heap(IndexScanDesc scan)
 		scan->xs_cbuf = ReleaseAndReadBuffer(scan->xs_cbuf,
 											 scan->heapRelation,
 											 ItemPointerGetBlockNumber(tid));
+		scan->heap_tids_fetched++;
 
 		/*
 		 * Prune page, but only if we weren't already on this page
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 016ce22..1115afc 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -487,6 +487,19 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
 }
 
 /*
+ *	_bt_prefetchbuf() -- Prefetch a buffer by block number
+ */
+void
+_bt_prefetchbuf(Relation rel, BlockNumber blkno)
+{
+	if (blkno != P_NEW && blkno != P_NONE)
+	{
+		/* Just prefetch an existing block of the relation */
+		PrefetchBuffer(rel, MAIN_FORKNUM, blkno);
+	}
+}
+
+/*
  *	_bt_getbuf() -- Get a buffer by block number for read or write.
  *
  *		blkno == P_NEW means to get an unallocated index page.	The page
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index b055eaf..bb60588 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -432,6 +432,12 @@ btbeginscan(PG_FUNCTION_ARGS)
 	so->killedItems = NULL;		/* until needed */
 	so->numKilled = 0;
 
+	so->backSeqRun = 0;
+	so->backSeqPos = 0;
+	so->prefetchItemIndex = 0;
+	so->lastHeapPrefetchBlkno = P_NONE;
+	so->prefetchBlockCount = 0;
+	
 	/*
 	 * We don't know yet whether the scan will be index-only, so we do not
 	 * allocate the tuple workspace arrays until btrescan.	However, we set up
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index e0c9523..21c492e 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -28,7 +28,8 @@ static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir,
 			 OffsetNumber offnum);
 static void _bt_saveitem(BTScanOpaque so, int itemIndex,
 			 OffsetNumber offnum, IndexTuple itup);
-static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir);
+static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir, 
+			 bool prefetch);
 static Buffer _bt_walk_left(Relation rel, Buffer buf);
 static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
 
@@ -961,7 +962,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
 		 * There's no actually-matching data on this page.  Try to advance to
 		 * the next page.  Return false if there's no matching data at all.
 		 */
-		if (!_bt_steppage(scan, dir))
+		if (!_bt_steppage(scan, dir, false))
 			return false;
 	}
 
@@ -996,6 +997,8 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
 {
 	BTScanOpaque so = (BTScanOpaque) scan->opaque;
 	BTScanPosItem *currItem;
+	BlockNumber prevblkno = ItemPointerGetBlockNumber(
+		&scan->xs_ctup.t_self);
 
 	/*
 	 * Advance to next tuple on current page; or if there's no more, try to
@@ -1008,11 +1011,51 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
 			/* We must acquire lock before applying _bt_steppage */
 			Assert(BufferIsValid(so->currPos.buf));
 			LockBuffer(so->currPos.buf, BT_READ);
-			if (!_bt_steppage(scan, dir))
+			if (!_bt_steppage(scan, dir, target_prefetch_pages > 0))
 				return false;
 			/* Drop the lock, but not pin, on the new page */
 			LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
 		}
+		
+		if (target_prefetch_pages > 0) {
+			BlockNumber currblkno = ItemPointerGetBlockNumber(
+				&so->currPos.items[so->currPos.itemIndex].heapTid);
+		
+			if (currblkno != prevblkno) {
+				if (so->prefetchBlockCount > 0)
+					so->prefetchBlockCount--;
+			
+				/* If we have heap fetch frequency stats, and it's above ~94%,
+				 * initiate heap prefetches */
+				if (so->currPos.moreRight
+					&& scan->heap_tids_seen > 256 
+					&& ( (scan->heap_tids_seen - scan->heap_tids_seen/16)
+						 <= scan->heap_tids_fetched ) )
+				{
+					bool nonsequential = false;
+				
+					if (so->prefetchItemIndex <= so->currPos.itemIndex)
+						so->prefetchItemIndex = so->currPos.itemIndex + 1;
+					while (    (so->prefetchItemIndex <= so->currPos.lastItem)
+							&& (so->prefetchBlockCount < target_prefetch_pages) )
+					{
+						ItemPointer tid = &so->currPos.items[so->prefetchItemIndex++].heapTid;
+						BlockNumber blkno = ItemPointerGetBlockNumber(tid);
+						if (blkno != so->lastHeapPrefetchBlkno) {
+							/* start prefetch on next page, but not
+							   if we're reading sequentially already,
+							   as it's counterproductive in those cases */
+							if (nonsequential || blkno != (so->lastHeapPrefetchBlkno+1)) {
+								_bt_prefetchbuf(scan->heapRelation, blkno );
+								nonsequential = true;
+							}
+							so->lastHeapPrefetchBlkno = blkno;
+							so->prefetchBlockCount++;
+						}
+					}
+				}
+			}
+		}
 	}
 	else
 	{
@@ -1021,11 +1064,51 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
 			/* We must acquire lock before applying _bt_steppage */
 			Assert(BufferIsValid(so->currPos.buf));
 			LockBuffer(so->currPos.buf, BT_READ);
-			if (!_bt_steppage(scan, dir))
+			if (!_bt_steppage(scan, dir, target_prefetch_pages > 0))
 				return false;
 			/* Drop the lock, but not pin, on the new page */
 			LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
 		}
+		
+		if (target_prefetch_pages > 0) {
+			BlockNumber currblkno = ItemPointerGetBlockNumber(
+				&so->currPos.items[so->currPos.itemIndex].heapTid);
+		
+			if (currblkno != prevblkno) {
+				if (so->prefetchBlockCount > 0)
+					so->prefetchBlockCount--;
+			
+				/* If we have heap fetch frequency stats, and it's above ~94%,
+				 * initiate heap prefetches */
+				if (so->currPos.moreLeft
+					&& scan->heap_tids_seen > 256 
+					&& ( (scan->heap_tids_seen - scan->heap_tids_seen/16)
+						 <= scan->heap_tids_fetched ) )
+				{
+					bool nonsequential = false;
+			
+					if (so->prefetchItemIndex >= so->currPos.itemIndex)
+						so->prefetchItemIndex = so->currPos.itemIndex - 1;
+					while (    (so->prefetchItemIndex >= so->currPos.firstItem)
+							&& (so->prefetchBlockCount < target_prefetch_pages) )
+					{
+						ItemPointer tid = &so->currPos.items[so->prefetchItemIndex--].heapTid;
+						BlockNumber blkno = ItemPointerGetBlockNumber(tid);
+						if (blkno != so->lastHeapPrefetchBlkno) {
+							/* start prefetch on next page, but not
+							   if we're reading sequentially already,
+							   as it's counterproductive in those cases */
+							if (nonsequential || blkno != (so->lastHeapPrefetchBlkno+1)) {
+								_bt_prefetchbuf(scan->heapRelation, blkno );
+								nonsequential = true;
+							}
+							so->lastHeapPrefetchBlkno = blkno;
+							so->prefetchBlockCount++;
+						}
+					}
+				}
+			}
+		}
 	}
 
 	/* OK, itemIndex says what to return */
@@ -1075,9 +1158,11 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum)
 	/*
 	 * we must save the page's right-link while scanning it; this tells us
 	 * where to step right to after we're done with these items.  There is no
-	 * corresponding need for the left-link, since splits always go right.
+	 * corresponding need for the left-link, since splits always go right,
+	 * but we need it for back-sequential scan detection.
 	 */
 	so->currPos.nextPage = opaque->btpo_next;
+	so->currPos.prevPage = opaque->btpo_prev;
 
 	/* initialize tuple workspace to empty */
 	so->currPos.nextTupleOffset = 0;
@@ -1112,6 +1197,7 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum)
 		so->currPos.firstItem = 0;
 		so->currPos.lastItem = itemIndex - 1;
 		so->currPos.itemIndex = 0;
+		so->prefetchItemIndex = 0;
 	}
 	else
 	{
@@ -1143,6 +1229,7 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum)
 		so->currPos.firstItem = itemIndex;
 		so->currPos.lastItem = MaxIndexTuplesPerPage - 1;
 		so->currPos.itemIndex = MaxIndexTuplesPerPage - 1;
+		so->prefetchItemIndex = MaxIndexTuplesPerPage - 1;
 	}
 
 	return (so->currPos.firstItem <= so->currPos.lastItem);
@@ -1180,7 +1267,7 @@ _bt_saveitem(BTScanOpaque so, int itemIndex,
  * locks and pins, set so->currPos.buf to InvalidBuffer, and return FALSE.
  */
 static bool
-_bt_steppage(IndexScanDesc scan, ScanDirection dir)
+_bt_steppage(IndexScanDesc scan, ScanDirection dir, bool prefetch)
 {
 	BTScanOpaque so = (BTScanOpaque) scan->opaque;
 	Relation	rel;
@@ -1243,8 +1330,17 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
 				PredicateLockPage(rel, blkno, scan->xs_snapshot);
 				/* see if there are any matches on this page */
 				/* note that this will clear moreRight if we can stop */
-				if (_bt_readpage(scan, dir, P_FIRSTDATAKEY(opaque)))
+				if (_bt_readpage(scan, dir, P_FIRSTDATAKEY(opaque))) {
+					if (prefetch && so->currPos.moreRight 
+							&& (opaque->btpo_next != (blkno+1))) 
+					{
+						/* start prefetch on next page, but not
+						   if we're reading sequentially already,
+						   as it's counterproductive in those cases */
+						_bt_prefetchbuf(rel, opaque->btpo_next);
+					}
 					break;
+				}
 			}
 			/* nope, keep going */
 			blkno = opaque->btpo_next;
@@ -1288,11 +1384,55 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
 			opaque = (BTPageOpaque) PageGetSpecialPointer(page);
 			if (!P_IGNORE(opaque))
 			{
+				/* We must rely on the previously saved prevPage link! */
+				BlockNumber blkno = so->currPos.prevPage;
+				
 				PredicateLockPage(rel, BufferGetBlockNumber(so->currPos.buf), scan->xs_snapshot);
 				/* see if there are any matches on this page */
 				/* note that this will clear moreLeft if we can stop */
-				if (_bt_readpage(scan, dir, PageGetMaxOffsetNumber(page)))
+				if (_bt_readpage(scan, dir, PageGetMaxOffsetNumber(page))) {
+					if (prefetch && so->currPos.moreLeft) {
+						/* detect back-sequential runs and increase prefetch window blindly 
+						 * downwards 2 blocks at a time. This only works in our favor
+						 * for index-only scans, by merging read requests at the kernel,
+						 * so we want to inflate target_prefetch_pages since merged 
+						 * back-sequential requests are about as expensive as a single one 
+						 */
+						if (scan->xs_want_itup && blkno > 0 && opaque->btpo_prev == (blkno-1)) {
+							BlockNumber backPos;
+							unsigned int back_prefetch_pages = target_prefetch_pages * 16;
+							if (back_prefetch_pages > 64)
+								back_prefetch_pages = 64;
+							
+							if (so->backSeqRun == 0)
+								backPos = (blkno-1);
+							else
+								backPos = so->backSeqPos;
+							so->backSeqRun++;
+							
+							if (backPos > 0 && (blkno - backPos) <= back_prefetch_pages) {
+								_bt_prefetchbuf(rel, backPos--);
+								/* don't start back-seq prefetch too early */
+								if (so->backSeqRun >= back_prefetch_pages
+										&& backPos > 0 
+										&& (blkno - backPos) <= back_prefetch_pages)
+								{
+									_bt_prefetchbuf(rel, backPos--);
+								}
+							}
+							
+							so->backSeqPos = backPos;
+						} else {
+							/* start prefetch on next page */
+							if (so->backSeqRun != 0) {
+								if (opaque->btpo_prev > blkno || opaque->btpo_prev < so->backSeqPos)
+									so->backSeqRun = 0;
+							}
+							_bt_prefetchbuf(rel, opaque->btpo_prev);
+						}
+					}
 					break;
+				}
 			}
 		}
 	}
@@ -1587,7 +1727,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
 		 * There's no actually-matching data on this page.  Try to advance to
 		 * the next page.  Return false if there's no matching data at all.
 		 */
-		if (!_bt_steppage(scan, dir))
+		if (!_bt_steppage(scan, dir, false))
 			return false;
 	}
 
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index d4941e0..b2a4eea 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -496,6 +496,7 @@ typedef struct BTScanPosData
 	Buffer		buf;			/* if valid, the buffer is pinned */
 
 	BlockNumber nextPage;		/* page's right link when we scanned it */
+	BlockNumber prevPage;		/* page's left link when we scanned it */
 
 	/*
 	 * moreLeft and moreRight track whether we think there may be matching
@@ -575,6 +576,15 @@ typedef struct BTScanOpaqueData
 	 */
 	int			markItemIndex;	/* itemIndex, or -1 if not valid */
 
+	/* prefetch logic state */
+	unsigned int	backSeqRun;	/* number of back-sequential pages in a run */
+	BlockNumber		backSeqPos;	/* blkid last prefetched in back-sequential 
+				          		   runs */
+	BlockNumber		lastHeapPrefetchBlkno;	/* blkid last prefetched from heap */
+	int				prefetchItemIndex; /* item index within currPos last
+					                      fetched by heap prefetch */
+	int				prefetchBlockCount; /* number of prefetched heap blocks */
+	
 	/* keep these last in struct for efficiency */
 	BTScanPosData currPos;		/* current position data */
 	BTScanPosData markPos;		/* marked position, if any */
@@ -628,6 +638,7 @@ extern Buffer _bt_getroot(Relation rel, int access);
 extern Buffer _bt_gettrueroot(Relation rel);
 extern void _bt_checkpage(Relation rel, Buffer buf);
 extern Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access);
+extern void _bt_prefetchbuf(Relation rel, BlockNumber blkno);
 extern Buffer _bt_relandgetbuf(Relation rel, Buffer obuf,
 				 BlockNumber blkno, int access);
 extern void _bt_relbuf(Relation rel, Buffer buf);
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index 87acc8e..3343fda 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -90,6 +90,10 @@ typedef struct IndexScanDescData
 	/* NB: if xs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
 	bool		xs_recheck;		/* T means scan keys must be rechecked */
 
+	/* heap fetch statistics for read-ahead logic */
+	unsigned int heap_tids_seen;
+	unsigned int heap_tids_fetched;
+
 	/* state data for traversing HOT chains in index_getnext */
 	bool		xs_continue_hot;	/* T if must keep walking HOT chain */
 }	IndexScanDescData;
