From dff9c509856223db78178a0f6fdd1a8b3f0affd7 Mon Sep 17 00:00:00 2001
From: Greg Burd <greg@burd.me>
Date: Tue, 6 May 2025 11:14:19 -0400
Subject: [PATCH v1 2/3] Reverse loop to match counterpart and optimize for CPU
 prefetching.

Small change to ensure that the loop in heap_page_is_all_visible()
takes advantage of the CPU prefetching optimization found in its
counterpart heap_page_prune_and_freeze() and is generally more in
sync.

Author: Greg Burd <greg@burd.me>
---
 src/backend/access/heap/vacuumlazy.c | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index e450911424f..14cf880340b 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -3625,10 +3625,15 @@ heap_page_is_all_visible(LVRelState *vacrel, Buffer buf,
 	*visibility_cutoff_xid = InvalidTransactionId;
 	*all_frozen = true;
 
+	/*
+	 * Processing the items in reverse order (and thus the tuples in
+	 * increasing order) increases prefetching efficiency significantly /
+	 * decreases the number of cache misses.
+	 */
 	maxoff = PageGetMaxOffsetNumber(page);
-	for (offnum = FirstOffsetNumber;
-		 offnum <= maxoff && all_visible;
-		 offnum = OffsetNumberNext(offnum))
+	for (offnum = maxoff;
+		 offnum >= FirstOffsetNumber && all_visible;
+		 offnum = OffsetNumberPrev(offnum))
 	{
 		ItemId		itemid;
 		HeapTupleData tuple;
-- 
2.39.5 (Apple Git-154)

