diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 6806e32..a8c446c 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -523,7 +523,8 @@ hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	orig_maxbucket = metap->hashm_maxbucket;
 	orig_ntuples = metap->hashm_ntuples;
 	memcpy(&local_metapage, metap, sizeof(local_metapage));
-	_hash_relbuf(rel, metabuf);
+	/* release the lock, but keep pin */
+	_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
 
 	/* Scan the buckets that we know exist */
 	cur_bucket = 0;
@@ -563,8 +564,21 @@ loop_top:
 		 */
 		if (!H_BUCKET_BEING_SPLIT(bucket_opaque) &&
 			H_NEEDS_SPLIT_CLEANUP(bucket_opaque))
+		{
 			split_cleanup = true;
 
+			/*
+			 * To perform split cleanup, refresh the meta page values.  It is
+			 * done to ensure that values of hashm_maxbucket, hashm_highmask
+			 * and hashm_lowmask are corresponding to latest split of the
+			 * bucket.  Otherwise, it will fail to remove tuples that are
+			 * moved by latest split.
+			 */
+			_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
+			memcpy(&local_metapage, metap, sizeof(local_metapage));
+			_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
+		}
+
 		bucket_buf = buf;
 
 		hashbucketcleanup(rel, cur_bucket, bucket_buf, blkno, info->strategy,
@@ -581,7 +595,7 @@ loop_top:
 	}
 
 	/* Write-lock metapage and check for split since we started */
-	metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE, LH_META_PAGE);
+	_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
 	metap = HashPageGetMeta(BufferGetPage(metabuf));
 
 	if (cur_maxbucket != metap->hashm_maxbucket)
@@ -589,7 +603,7 @@ loop_top:
 		/* There's been a split, so process the additional bucket(s) */
 		cur_maxbucket = metap->hashm_maxbucket;
 		memcpy(&local_metapage, metap, sizeof(local_metapage));
-		_hash_relbuf(rel, metabuf);
+		_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
 		goto loop_top;
 	}
 
@@ -689,6 +703,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 	Buffer		buf;
 	Bucket new_bucket PG_USED_FOR_ASSERTS_ONLY = InvalidBucket;
 	bool		bucket_dirty = false;
+	bool		curr_page_dirty;
 
 	blkno = bucket_blkno;
 	buf = bucket_buf;
@@ -708,7 +723,8 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 		OffsetNumber deletable[MaxOffsetNumber];
 		int			ndeletable = 0;
 		bool		retain_pin = false;
-		bool		curr_page_dirty = false;
+
+		curr_page_dirty = false;
 
 		vacuum_delay_point();
 
@@ -827,7 +843,10 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 	 */
 	if (buf != bucket_buf)
 	{
-		_hash_relbuf(rel, buf);
+		if (curr_page_dirty)
+			_hash_wrtbuf(rel, buf);
+		else
+			_hash_relbuf(rel, buf);
 		_hash_chgbufaccess(rel, bucket_buf, HASH_NOLOCK, HASH_WRITE);
 	}
 
@@ -849,6 +868,16 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 	}
 
 	/*
+	 * we need to release and reacquire the lock on bucket buffer to ensure
+	 * that standby shouldn't see an intermediate state of it.  This is mainly
+	 * required once hash indexes are WAL logged, but without that also it
+	 * helps in simplifying the code as without that we need to pass the
+	 * information of bucket buffer being dirty to _hash_squeezebucket.
+	 */
+	_hash_chgbufaccess(rel, bucket_buf, HASH_WRITE, HASH_NOLOCK);
+	_hash_chgbufaccess(rel, bucket_buf, HASH_NOLOCK, HASH_WRITE);
+
+	/*
 	 * If we have deleted anything, try to compact free space.  For squeezing
 	 * the bucket, we must have a cleanup lock, else it can impact the
 	 * ordering of tuples for a scan that has started before it.
@@ -857,7 +886,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
 		_hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf,
 							bstrategy);
 	else
-		_hash_chgbufaccess(rel, bucket_buf, HASH_WRITE, HASH_NOLOCK);
+		_hash_chgbufaccess(rel, bucket_buf, HASH_READ, HASH_NOLOCK);
 }
 
 void
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index e2d208e..cc922a9 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -369,8 +369,8 @@ _hash_firstfreebit(uint32 map)
  *	Since this function is invoked in VACUUM, we provide an access strategy
  *	parameter that controls fetches of the bucket pages.
  *
- *	Returns the block number of the page that followed the given page
- *	in the bucket, or InvalidBlockNumber if no following page.
+ *	Returns the buffer that followed the given wbuf in the bucket, or
+ *	InvalidBuffer if no following page.
  *
  *	NB: caller must not hold lock on metapage, nor on page, that's next to
  *	ovflbuf in the bucket chain.  We don't acquire the lock on page that's
@@ -378,7 +378,7 @@ _hash_firstfreebit(uint32 map)
  *	has a lock on same.  This function releases the lock on wbuf and caller
  *	is responsible for releasing the pin on same.
  */
-BlockNumber
+Buffer
 _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
 				   bool wbuf_dirty, BufferAccessStrategy bstrategy)
 {
@@ -386,14 +386,17 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
 	Buffer		metabuf;
 	Buffer		mapbuf;
 	Buffer		prevbuf = InvalidBuffer;
+	Buffer		next_wbuf = InvalidBuffer;
 	BlockNumber ovflblkno;
 	BlockNumber prevblkno;
 	BlockNumber blkno;
 	BlockNumber nextblkno;
 	BlockNumber writeblkno;
 	HashPageOpaque ovflopaque;
+	HashPageOpaque wopaque;
 	Page		ovflpage;
 	Page		mappage;
+	Page		wpage;
 	uint32	   *freep;
 	uint32		ovflbitno;
 	int32		bitmappage,
@@ -446,14 +449,13 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
 
 		if (prevblkno != writeblkno)
 			_hash_wrtbuf(rel, prevbuf);
+		else
+		{
+			/* ensure to mark prevbuf as dirty */
+			wbuf_dirty = true;
+		}
 	}
 
-	/* write and unlock the write buffer */
-	if (wbuf_dirty)
-		_hash_chgbufaccess(rel, wbuf, HASH_WRITE, HASH_NOLOCK);
-	else
-		_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
-
 	if (BlockNumberIsValid(nextblkno))
 	{
 		Buffer		nextbuf = _hash_getbuf_with_strategy(rel,
@@ -469,6 +471,38 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
 		_hash_wrtbuf(rel, nextbuf);
 	}
 
+	/*
+	 * To maintain lock chaining as described atop hashbucketcleanup, we need
+	 * to lock next bucket buffer in chain before releasing current.  This is
+	 * required only if the next overflow page from which to read is not same
+	 * as page to which we need to write.
+	 *
+	 * XXX Here, we are moving to next overflow page for writing without
+	 * ensuring if the previous write page is full.  This is annoying, but
+	 * should not hurt much in practice as that space will anyway be consumed
+	 * by future inserts.
+	 */
+	if (prevblkno != writeblkno)
+	{
+		wpage = BufferGetPage(wbuf);
+		wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage);
+		Assert(wopaque->hasho_bucket == bucket);
+		writeblkno = wopaque->hasho_nextblkno;
+
+		if (BlockNumberIsValid(writeblkno));
+		next_wbuf = _hash_getbuf_with_strategy(rel,
+											   writeblkno,
+											   HASH_WRITE,
+											   LH_OVERFLOW_PAGE,
+											   bstrategy);
+	}
+
+	/* write and unlock the write buffer */
+	if (wbuf_dirty)
+		_hash_chgbufaccess(rel, wbuf, HASH_WRITE, HASH_NOLOCK);
+	else
+		_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
+
 	/* Note: bstrategy is intentionally not used for metapage and bitmap */
 
 	/* Read the metapage so we can determine which bitmap page to use */
@@ -511,7 +545,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
 		_hash_relbuf(rel, metabuf);
 	}
 
-	return nextblkno;
+	return next_wbuf;
 }
 
 
@@ -676,6 +710,7 @@ _hash_squeezebucket(Relation rel,
 		OffsetNumber deletable[MaxOffsetNumber];
 		int			ndeletable = 0;
 		bool		retain_pin = false;
+		Buffer		next_wbuf = InvalidBuffer;
 
 		/* Scan each tuple in "read" page */
 		maxroffnum = PageGetMaxOffsetNumber(rpage);
@@ -701,8 +736,6 @@ _hash_squeezebucket(Relation rel,
 			 */
 			while (PageGetFreeSpace(wpage) < itemsz)
 			{
-				Buffer		next_wbuf = InvalidBuffer;
-
 				Assert(!PageIsEmpty(wpage));
 
 				if (wblkno == bucket_blkno)
@@ -789,19 +822,29 @@ _hash_squeezebucket(Relation rel,
 		Assert(BlockNumberIsValid(rblkno));
 
 		/* free this overflow page (releases rbuf) */
-		_hash_freeovflpage(rel, rbuf, wbuf, wbuf_dirty, bstrategy);
+		next_wbuf = _hash_freeovflpage(rel, rbuf, wbuf, wbuf_dirty, bstrategy);
+
+		/* retain the pin on primary bucket page till end of bucket scan */
+		if (wblkno != bucket_blkno)
+			_hash_dropbuf(rel, wbuf);
 
 		/* are we freeing the page adjacent to wbuf? */
 		if (rblkno == wblkno)
+			return;
+
+		/* are we freeing the page adjacent to next_wbuf? */
+		if (BufferIsValid(next_wbuf) &&
+			rblkno == BufferGetBlockNumber(next_wbuf))
 		{
-			/* retain the pin on primary bucket page till end of bucket scan */
-			if (wblkno != bucket_blkno)
-				_hash_dropbuf(rel, wbuf);
+			_hash_relbuf(rel, next_wbuf);
 			return;
 		}
 
-		/* lock the overflow page being written, then get the previous one */
-		_hash_chgbufaccess(rel, wbuf, HASH_NOLOCK, HASH_WRITE);
+		wbuf = next_wbuf;
+		wblkno = BufferGetBlockNumber(wbuf);
+		wpage = BufferGetPage(wbuf);
+		wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage);
+		Assert(wopaque->hasho_bucket == bucket);
 
 		rbuf = _hash_getbuf_with_strategy(rel,
 										  rblkno,
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 6dfc41f..bc63719 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -313,7 +313,7 @@ extern OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf,
 
 /* hashovfl.c */
 extern Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin);
-extern BlockNumber _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
+extern Buffer _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
 				   bool wbuf_dirty, BufferAccessStrategy bstrategy);
 extern void _hash_initbitmap(Relation rel, HashMetaPage metap,
 				 BlockNumber blkno, ForkNumber forkNum);
