diff --git a/src/backend/access/hash/README b/src/backend/access/hash/README
index 5827389a70..c2c4863c02 100644
--- a/src/backend/access/hash/README
+++ b/src/backend/access/hash/README
@@ -396,8 +396,9 @@ The fourth operation is garbage collection (bulk deletion):
 			mark the target page dirty
 			write WAL for deleting tuples from target page
 			if this is the last bucket page, break out of loop
-			pin and x-lock next page
-			release prior lock and pin (except keep pin on primary bucket page)
+			release lock and pin (except keep pin on primary bucket page)
+			pin and x-lock next page (unless !RelationNeedsWAL, in which case
+              this is instead prior to releasing the lock and pin)
 		if the page we have locked is not the primary bucket page:
 			release lock and take exclusive lock on primary bucket page
 		if there are no other pins on the primary bucket page:
@@ -449,8 +450,13 @@ for a scan to start after VACUUM has released the cleanup lock on the bucket
 but before it has processed the entire bucket and then overtake the cleanup
 operation.
 
-Currently, we prevent this using lock chaining: cleanup locks the next page
-in the chain before releasing the lock and pin on the page just processed.
+For temporary and unlogged relations, we prevent this using lock chaining:
+cleanup locks the next page in the chain before releasing the lock and pin
+on the page just processed.  For permanent relations, we use a different
+solution: when a scan is about to kill items, it checks whether the page LSN
+has changed since the page was initially examined and, if so, skips trying
+to kill items.  This is considered a better solution because lock chaining is
+generally undesirable, but it also has the downside of postponing clenaup.
 
 Free Space Management
 ---------------------
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 0fef60a858..3420775b0a 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -662,8 +662,10 @@ hashvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
  * wake up only after VACUUM has completed and the TID has been recycled for
  * an unrelated tuple.  To avoid that calamity, we prevent scans from passing
  * our cleanup scan by locking the next page in the bucket chain before
- * releasing the lock on the previous page.  (This type of lock chaining is not
- * ideal, so we might want to look for a better solution at some point.)
+ * releasing the lock on the previous page.  However, we only need to worry
+ * about this for scans of temporary or unlogged tables; permanent tables
+ * won't have this problem, because _hash_kill_items will notice that the
+ * page LSN has changed and skip cleanup.
  *
  * We need to retain a pin on the primary bucket to ensure that no concurrent
  * split can start.
@@ -832,18 +834,36 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 		if (!BlockNumberIsValid(blkno))
 			break;
 
-		next_buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
-											  LH_OVERFLOW_PAGE,
-											  bstrategy);
-
 		/*
-		 * release the lock on previous page after acquiring the lock on next
-		 * page
+		 * As the hash index scan works in page-at-a-time mode, vacuum can
+		 * release the lock on previous page before acquiring lock on the next
+		 * page for regular tables, but, for unlogged tables, we avoid this as
+		 * we do not want scan to cross vacuum when both are running on the
+		 * same bucket page. This is to ensure that, we are safe during dead
+		 * marking of index tuples in _hash_kill_items().
 		 */
-		if (retain_pin)
-			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+		if (RelationNeedsWAL(rel))
+		{
+			if (retain_pin)
+				LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+			else
+				_hash_relbuf(rel, buf);
+
+			next_buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
+												  LH_OVERFLOW_PAGE,
+												  bstrategy);
+		}
 		else
-			_hash_relbuf(rel, buf);
+		{
+			next_buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
+												  LH_OVERFLOW_PAGE,
+												  bstrategy);
+
+			if (retain_pin)
+				LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+			else
+				_hash_relbuf(rel, buf);
+		}
 
 		buf = next_buf;
 	}
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index c206e704d4..b41afbb416 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -524,7 +524,7 @@ _hash_freeovflpage(Relation rel, Buffer bucketbuf, Buffer ovflbuf,
 	 * Fix up the bucket chain.  this is a doubly-linked list, so we must fix
 	 * up the bucket chain members behind and ahead of the overflow page being
 	 * deleted.  Concurrency issues are avoided by using lock chaining as
-	 * described atop hashbucketcleanup.
+	 * described atop _hash_squeezebucket.
 	 */
 	if (BlockNumberIsValid(prevblkno))
 	{
@@ -790,9 +790,14 @@ _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage)
  *	Caller must acquire cleanup lock on the primary page of the target
  *	bucket to exclude any scans that are in progress, which could easily
  *	be confused into returning the same tuple more than once or some tuples
- *	not at all by the rearrangement we are performing here.  To prevent
- *	any concurrent scan to cross the squeeze scan we use lock chaining
- *	similar to hasbucketcleanup.  Refer comments atop hashbucketcleanup.
+ *	not at all by the rearrangement we are performing here. This means there
+ *	can't be any concurrent scans in progress when we first enter this
+ *	function because of the cleanup lock we hold on the primary bucket page,
+ *	but as soon as we release that lock, there might be. To prevent any
+ *	concurrent scan to cross the squeeze scan we use lock chaining i.e.
+ *	we lock the next page in the bucket chain before releasing the lock on
+ *	the previous page. (This type of lock chaining is not ideal, so we might
+ *	want to look for a better solution at some point.)
  *
  *	We need to retain a pin on the primary bucket to ensure that no concurrent
  *	split can start.
