diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 2e668f3c1a..575eb91e6e 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -39,6 +39,7 @@ typedef struct
 	double		indtuples;		/* # tuples accepted into index */
 	Relation	heapRel;		/* heap relation descriptor */
 	int			NumIndexKeyAttrs; /* number of keys in index */
+	IndexUniqueCheck checkUnique; /* type of uniqueness check */
 } HashBuildState;
 
 static void hashbuildCallback(Relation index,
@@ -64,7 +65,7 @@ hashhandler(PG_FUNCTION_ARGS)
 	amroutine->amcanorder = false;
 	amroutine->amcanorderbyop = false;
 	amroutine->amcanbackward = true;
-	amroutine->amcanunique = false;
+	amroutine->amcanunique = true;
 	amroutine->amcanmulticol = true;
 	amroutine->amoptionalkey = false;
 	amroutine->amsearcharray = false;
@@ -166,6 +167,7 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	buildstate.indtuples = 0;
 	buildstate.heapRel = heap;
 	buildstate.NumIndexKeyAttrs = indexInfo->ii_NumIndexKeyAttrs;
+	buildstate.checkUnique = (indexInfo->ii_Unique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO);
 
 	/* do the heap scan */
 	reltuples = table_index_build_scan(heap, index, indexInfo, true, true,
@@ -177,7 +179,10 @@ hashbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	if (buildstate.spool)
 	{
 		/* sort the tuples and insert them into the index */
-		_h_indexbuild(buildstate.spool, buildstate.heapRel);
+		_h_indexbuild(buildstate.spool,
+						buildstate.heapRel,
+						buildstate.checkUnique,
+						buildstate.NumIndexKeyAttrs);
 		_h_spooldestroy(buildstate.spool);
 	}
 
@@ -232,7 +237,12 @@ hashbuildCallback(Relation index,
 		itup = index_form_tuple(RelationGetDescr(index),
 								index_values, index_isnull);
 		itup->t_tid = *tid;
-		_hash_doinsert(index, itup, buildstate->heapRel);
+		(void) _hash_doinsert(index,
+								itup,
+								buildstate->heapRel,
+								buildstate->checkUnique,
+								buildstate->NumIndexKeyAttrs,
+								false);
 		pfree(itup);
 	}
 
@@ -255,6 +265,7 @@ hashinsert(Relation rel, Datum *values, bool *isnull,
 	Datum		index_values[1];
 	bool		index_isnull[1];
 	IndexTuple	itup;
+	bool		result;
 
 	/* convert data to a hash key; on failure, do not insert anything */
 	if (!_hash_convert_tuple(rel, indexInfo->ii_NumIndexKeyAttrs,
@@ -266,11 +277,16 @@ hashinsert(Relation rel, Datum *values, bool *isnull,
 	itup = index_form_tuple(RelationGetDescr(rel), index_values, index_isnull);
 	itup->t_tid = *ht_ctid;
 
-	_hash_doinsert(rel, itup, heapRel);
+	result = _hash_doinsert(rel,
+							itup,
+							heapRel,
+							checkUnique,
+							indexInfo->ii_NumIndexKeyAttrs,
+							indexUnchanged);
 
 	pfree(itup);
 
-	return false;
+	return result;
 }
 
 
@@ -376,6 +392,7 @@ hashbeginscan(Relation rel, int nkeys, int norderbys)
 	HashScanPosInvalidate(so->currPos);
 	so->hashso_bucket_buf = InvalidBuffer;
 	so->hashso_split_bucket_buf = InvalidBuffer;
+	so->hashso_access = HASH_READ;
 
 	so->hashso_buc_populated = false;
 	so->hashso_buc_split = false;
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index d254a00b6a..721ef4e862 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -17,23 +17,37 @@
 
 #include "access/hash.h"
 #include "access/hash_xlog.h"
+#include "access/tableam.h"
 #include "miscadmin.h"
 #include "storage/buf_internals.h"
+#include "storage/lmgr.h"
 #include "storage/lwlock.h"
 #include "storage/predicate.h"
 #include "utils/rel.h"
 
 static void _hash_vacuum_one_page(Relation rel, Relation hrel,
 								  Buffer metabuf, Buffer buf);
+static TransactionId _hash_check_unique(IndexScanDesc scan, IndexTuple itup, Relation heapRel,
+				 IndexUniqueCheck checkUnique, bool *is_unique, uint32 *speculativeToken);
+static TransactionId _hash_resolve_possible_duplicate(IndexTuple itup, ItemPointer pdtid, bool *is_unique);
 
 /*
  *	_hash_doinsert() -- Handle insertion of a single index tuple.
  *
  *		This routine is called by the public interface routines, hashbuild
  *		and hashinsert.  By here, itup is completely filled in.
+ *
+ * 		If checkUnique != UNIQUE_CHECK_NO then we need additional information
+ *		for uniqueness checking, though that can be skipped if indexUnchanged,
+ *		which is an executor hint for non-HOT UPDATEs. Since we store only
+ *		the hash value for the index, not the full column values, we must
+ *		check all heap tuples with matching hashkeys, even though many may be
+ *		false positive hash collisions. Even so, we need to know the number
+ *		of index attributes and the AttrNums, so we can compare heap tuples.
  */
-void
-_hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel)
+bool
+_hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel,
+				IndexUniqueCheck checkUnique, int num_index_attrs, bool indexUnchanged)
 {
 	Buffer		buf = InvalidBuffer;
 	Buffer		bucket_buf;
@@ -48,6 +62,9 @@ _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel)
 	uint32		hashkey;
 	Bucket		bucket;
 	OffsetNumber itup_off;
+	bool        checkingunique = (checkUnique != UNIQUE_CHECK_NO && !indexUnchanged);
+	bool		is_unique = checkingunique;
+	uint32		speculativeToken;
 
 	/*
 	 * Get the hash key for the item (it's stored in the index tuple itself).
@@ -66,22 +83,11 @@ restart_insert:
 	 * examine pd_pagesize_version, but that can't change so we can examine it
 	 * without a lock.
 	 */
-	metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE);
-	metapage = BufferGetPage(metabuf);
-
-	/*
-	 * Check whether the item can fit on a hash page at all. (Eventually, we
-	 * ought to try to apply TOAST methods if not.)  Note that at this point,
-	 * itemsz doesn't include the ItemId.
-	 *
-	 * XXX this is useless code if we are only storing hash keys.
-	 */
-	if (itemsz > HashMaxItemSize(metapage))
-		ereport(ERROR,
-				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
-				 errmsg("index row size %zu exceeds hash maximum %zu",
-						itemsz, HashMaxItemSize(metapage)),
-				 errhint("Values larger than a buffer page cannot be indexed.")));
+	if (checkUnique != UNIQUE_CHECK_EXISTING)
+	{
+		metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_NOLOCK, LH_META_PAGE);
+		metapage = BufferGetPage(metabuf);
+	}
 
 	/* Lock the primary bucket page for the target bucket. */
 	buf = _hash_getbucketbuf_from_hashkey(rel, hashkey, HASH_WRITE,
@@ -106,7 +112,8 @@ restart_insert:
 	 * not if we're trying to insert into the bucket into which tuples are
 	 * being moved (the "new" bucket).
 	 */
-	if (H_BUCKET_BEING_SPLIT(pageopaque) && IsBufferCleanupOK(buf))
+	if (checkUnique != UNIQUE_CHECK_EXISTING &&
+		H_BUCKET_BEING_SPLIT(pageopaque) && IsBufferCleanupOK(buf))
 	{
 		/* release the lock on bucket buffer, before completing the split. */
 		LockBuffer(buf, BUFFER_LOCK_UNLOCK);
@@ -122,6 +129,90 @@ restart_insert:
 		goto restart_insert;
 	}
 
+	/*
+	 * checkingunique inserts are not allowed to go ahead when two tuples with
+	 * equal key attribute values would be visible to new MVCC snapshots once
+	 * the xact commits.  Check for conflicts here.
+	 *
+	 * NOTE: obviously, _hash_check_unique can only detect keys that are already
+	 * in the index; so it cannot defend against concurrent insertions of the
+	 * same key.  We protect against that by means of holding a write lock on
+	 * the bucket page.  Any other would-be inserter of
+	 * the same key must acquire a write lock on the same page, so only one
+	 * would-be inserter can be making the check at one time.  Furthermore,
+	 * once we are past the check we hold write locks continuously until we
+	 * have performed our insertion, so no later inserter can fail to see our
+	 * insertion, taking care during our scan to start from the right place
+	 * if a split is in progress.
+	 *
+	 * If we must wait for another xact, we release the lock and pin while
+	 * waiting, and then must perform a new search.
+	 *
+	 * For a partial uniqueness check, we don't wait for the other xact. Just
+	 * let the tuple in and return false for possibly non-unique, or true for
+	 * definitely unique.
+	 */
+	if (checkingunique)
+	{
+		IndexScanDesc scan;
+		HashScanOpaque so;
+		TransactionId xwait;
+
+		/*
+		 * Start an index scan to search for possible duplicates,
+		 * but marked with access of HASH_WRITE, so hashsearch.c knows to
+		 * hold the ExclusiveLock on the bucket buf for entire scan.
+		 * Note that we scan using HASH_READ on non-bucket pages.
+		 */
+		scan = hashbeginscan(rel, num_index_attrs, 0);
+		so = (HashScanOpaque) scan->opaque;
+		so->hashso_bucket_buf = buf;
+		so->hashso_sk_hash = hashkey;
+		so->hashso_access = HASH_WRITE;
+
+		/*
+		 * Check there are no duplicates. If we find one that is
+		 * committed, throw ERROR, otherwise wait and see.
+		 */
+		xwait = _hash_check_unique(scan, itup, heapRel, checkUnique,
+									 &is_unique, &speculativeToken);
+		pfree(scan->opaque);
+		pfree(scan);
+
+		if (unlikely(TransactionIdIsValid(xwait)))
+		{
+			/* Have to wait for the other guy ... */
+			elog(NOTICE, "wait for other guy");
+
+			/* release the pin on old and meta buffer.  retry for insert. */
+			_hash_dropbuf(rel, buf);
+			_hash_dropbuf(rel, metabuf);
+
+			/*
+			 * If it's a speculative insertion, wait for it to finish (ie. to
+			 * go ahead with the insertion, or kill the tuple).  Otherwise
+			 * wait for the transaction to finish as usual.
+			 */
+			if (speculativeToken)
+				SpeculativeInsertionWait(xwait, speculativeToken);
+			else
+				XactLockTableWait(xwait, rel, &itup->t_tid, XLTW_InsertIndex);
+
+			/* start over... */
+			goto restart_insert;
+		}
+
+		/*
+		 * We have now confirmed that there are no duplicates.
+		 * If this was a recheck, our job is done.
+		 */
+		if (checkUnique == UNIQUE_CHECK_EXISTING)
+		{
+			_hash_dropbuf(rel, buf);
+			return true;
+		}
+	}
+
 	/* Do the insertion */
 	while (PageGetFreeSpace(page) < itemsz)
 	{
@@ -251,6 +342,8 @@ restart_insert:
 
 	/* Finally drop our pin on the metapage */
 	_hash_dropbuf(rel, metabuf);
+
+	return is_unique;
 }
 
 /*
@@ -430,3 +523,98 @@ _hash_vacuum_one_page(Relation rel, Relation hrel, Buffer metabuf, Buffer buf)
 		LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
 	}
 }
+
+/*
+ *	_hash_check_unique() -- Check for violation of unique index constraint
+ *                          for one heap tuple.
+ *
+ * Returns InvalidTransactionId if there is no conflict, else an xact ID
+ * we must wait for to see if it commits a conflicting tuple.   If an actual
+ * conflict is detected, no return --- just ereport().  If an xact ID is
+ * returned, and the conflicting tuple still has a speculative insertion in
+ * progress, *speculativeToken is set to non-zero, and the caller can wait for
+ * the verdict on the insertion using SpeculativeInsertionWait().
+ *
+ * However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return
+ * InvalidTransactionId because we don't want to wait.  In this case we
+ * set *is_unique to false if there is a potential conflict, and the
+ * core code must redo the uniqueness check later.
+ *
+ * Do not call here when there are NULL values in scan key.  NULL should be
+ * considered unequal to NULL when checking for duplicates, but we are not
+ * prepared to handle that correctly.
+ *
+ * Bucket page is locked and pinned on entry and remains so until exit,
+ * except when we need to swap the locks because of an in-progress split.
+ */
+static TransactionId
+_hash_check_unique(IndexScanDesc scan, IndexTuple itup, Relation heapRel,
+				 IndexUniqueCheck checkUnique, bool *is_unique, uint32 *speculativeToken)
+{
+	HashScanOpaque	so = (HashScanOpaque) scan->opaque;
+	bool			res;
+	HashScanPosItem *currItem;
+	TransactionId xwait = InvalidTransactionId;
+
+	res = _hash_first(scan, ForwardScanDirection);
+
+	if (res && checkUnique == UNIQUE_CHECK_PARTIAL)
+	{
+		/*
+		 * Possible duplicate exists, but we can check that later,
+		 * even if it is a false positive.
+		 */
+		*is_unique = false;
+		return InvalidTransactionId;
+	}
+	else
+		*is_unique = true;
+
+	if (res)
+		elog(NOTICE, "possible duplicate found with hashkey %u", so->hashso_sk_hash);
+
+	while (res)
+	{
+		currItem = &so->currPos.items[so->currPos.itemIndex];
+
+		xwait = _hash_resolve_possible_duplicate(itup, &(currItem->heapTid), is_unique);
+
+		if (TransactionIdIsValid(xwait))
+			break;
+
+		res = _hash_next(scan, ForwardScanDirection);
+	}
+
+	return xwait;
+}
+
+/*
+ *   _hash_resolve_possible_duplicate
+ *
+ * Hash index desn't store column values, so we have two
+ * heap TIDs to look at to see what their column values are
+ * and compare them using appropriate operators.
+ */
+static TransactionId
+_hash_resolve_possible_duplicate(IndexTuple itup, ItemPointer pdtid, bool *is_unique)
+{
+	SnapshotData SnapshotDirty;
+
+	InitDirtySnapshot(SnapshotDirty);
+
+	elog(NOTICE, "possible duplicate (%u,%u)",
+						ItemPointerGetBlockNumber(pdtid),
+						ItemPointerGetOffsetNumber(pdtid));
+
+	/*
+	 * Fetch heap tuple at pdtid using Snapshot Dirty.
+	 *
+	 * If live/inprogress, access heap tuple to get indexed column values,
+	 * then fetch itup->htid to get the indexed column values and
+	 * compare them using appropriate operators.
+	 */
+
+	/* Act as if there was no duplicate, for now */
+	*is_unique = true;
+	return InvalidTransactionId;
+}
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index c5c2382b36..58027bed4d 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -287,23 +287,36 @@ _hash_dropbuf(Relation rel, Buffer buf)
 void
 _hash_dropscanbuf(Relation rel, HashScanOpaque so)
 {
-	/* release pin we hold on primary bucket page */
-	if (BufferIsValid(so->hashso_bucket_buf) &&
-		so->hashso_bucket_buf != so->currPos.buf)
-		_hash_dropbuf(rel, so->hashso_bucket_buf);
-	so->hashso_bucket_buf = InvalidBuffer;
+	if (so->hashso_access == HASH_READ)
+	{
+		/* release pin we hold on primary bucket page */
+		if (BufferIsValid(so->hashso_bucket_buf) &&
+			so->hashso_bucket_buf != so->currPos.buf)
+			_hash_dropbuf(rel, so->hashso_bucket_buf);
+		so->hashso_bucket_buf = InvalidBuffer;
+
+		/* release any pin we still hold */
+		if (BufferIsValid(so->currPos.buf))
+			_hash_dropbuf(rel, so->currPos.buf);
+		so->currPos.buf = InvalidBuffer;
+	}
+	else
+	{
+		Assert(so->hashso_access == HASH_WRITE);
 
-	/* release pin we hold on primary bucket page  of bucket being split */
+		/* release any pin we still hold */
+		if (BufferIsValid(so->currPos.buf) &&
+			so->hashso_bucket_buf != so->currPos.buf)
+			_hash_dropbuf(rel, so->currPos.buf);
+		so->currPos.buf = InvalidBuffer;
+	}
+
+	/* release pin we hold on primary bucket page of bucket being split */
 	if (BufferIsValid(so->hashso_split_bucket_buf) &&
 		so->hashso_split_bucket_buf != so->currPos.buf)
 		_hash_dropbuf(rel, so->hashso_split_bucket_buf);
 	so->hashso_split_bucket_buf = InvalidBuffer;
 
-	/* release any pin we still hold */
-	if (BufferIsValid(so->currPos.buf))
-		_hash_dropbuf(rel, so->currPos.buf);
-	so->currPos.buf = InvalidBuffer;
-
 	/* reset split scan */
 	so->hashso_buc_populated = false;
 	so->hashso_buc_split = false;
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 021a80ae63..303b9a9e74 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -6,6 +6,7 @@
  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
+ * Read scans use HASH_READ; uniqueness checks for index inserts use HASH_WRITE.
  *
  * IDENTIFICATION
  *	  src/backend/access/hash/hashsearch.c
@@ -144,14 +145,26 @@ _hash_readnext(IndexScanDesc scan,
 	 * Retain the pin on primary bucket page till the end of scan.  Refer the
 	 * comments in _hash_first to know the reason of retaining pin.
 	 */
-	if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
-		LockBuffer(*bufp, BUFFER_LOCK_UNLOCK);
+	if (so->hashso_access == HASH_READ)
+	{
+		if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
+			LockBuffer(*bufp, BUFFER_LOCK_UNLOCK);
+		else
+			_hash_relbuf(rel, *bufp);
+
+		/* check for interrupts while we're not holding any buffer lock */
+		CHECK_FOR_INTERRUPTS();
+	}
 	else
-		_hash_relbuf(rel, *bufp);
+	{
+		Assert(so->hashso_access == HASH_WRITE);
+
+		if (*bufp != so->hashso_bucket_buf)
+			_hash_relbuf(rel, *bufp);
+	}
 
 	*bufp = InvalidBuffer;
-	/* check for interrupts while we're not holding any buffer lock */
-	CHECK_FOR_INTERRUPTS();
+
 	if (BlockNumberIsValid(blkno))
 	{
 		*bufp = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE);
@@ -214,6 +227,7 @@ _hash_readprev(IndexScanDesc scan,
 	if (*bufp == so->hashso_bucket_buf || *bufp == so->hashso_split_bucket_buf)
 	{
 		LockBuffer(*bufp, BUFFER_LOCK_UNLOCK);
+		Assert(so->hashso_access == HASH_READ);
 		haveprevblk = false;
 	}
 	else
@@ -294,67 +308,78 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
 	Relation	rel = scan->indexRelation;
 	HashScanOpaque so = (HashScanOpaque) scan->opaque;
 	ScanKey		cur;
-	uint32		hashkey;
 	Bucket		bucket;
 	Buffer		buf;
 	Page		page;
 	HashPageOpaque opaque;
 	HashScanPosItem *currItem;
 
-	pgstat_count_index_scan(rel);
+	if (so->hashso_access == HASH_READ)
+	{
+		uint32		hashkey;
 
-	/*
-	 * We do not support hash scans with no index qualification, because we
-	 * would have to read the whole index rather than just one bucket. That
-	 * creates a whole raft of problems, since we haven't got a practical way
-	 * to lock all the buckets against splits or compactions.
-	 */
-	if (scan->numberOfKeys < 1)
-		ereport(ERROR,
-				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("hash indexes do not support whole-index scans")));
+		Assert(so->hashso_bucket_buf == InvalidBuffer);
 
-	/* There may be more than one index qual, but we hash only the first */
-	cur = &scan->keyData[0];
+		pgstat_count_index_scan(rel);
 
-	/* And there's only one operator strategy, too */
-	Assert(cur->sk_strategy == HTEqualStrategyNumber);
+		/*
+		 * We do not support hash scans with no index qualification, because we
+		 * would have to read the whole index rather than just one bucket. That
+		 * creates a whole raft of problems, since we haven't got a practical way
+		 * to lock all the buckets against splits or compactions.
+		 */
+		if (scan->numberOfKeys < 1)
+			ereport(ERROR,
+					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+					 errmsg("hash indexes do not support whole-index scans")));
 
-	/*
-	 * If the constant in the index qual is NULL, assume it cannot match any
-	 * items in the index.
-	 */
-	if (cur->sk_flags & SK_ISNULL)
-		return false;
+		/* There may be more than one index qual, but we hash only the first */
+		cur = &scan->keyData[0];
 
-	/*
-	 * Okay to compute the hash key.  We want to do this before acquiring any
-	 * locks, in case a user-defined hash function happens to be slow.
-	 *
-	 * If scankey operator is not a cross-type comparison, we can use the
-	 * cached hash function; otherwise gotta look it up in the catalogs.
-	 *
-	 * We support the convention that sk_subtype == InvalidOid means the
-	 * opclass input type; this is a hack to simplify life for ScanKeyInit().
-	 */
-	if (cur->sk_subtype == rel->rd_opcintype[0] ||
-		cur->sk_subtype == InvalidOid)
-		hashkey = _hash_datum2hashkey(rel, cur->sk_argument);
-	else
-		hashkey = _hash_datum2hashkey_type(rel, cur->sk_argument,
-										   cur->sk_subtype);
+		/* And there's only one operator strategy, too */
+		Assert(cur->sk_strategy == HTEqualStrategyNumber);
 
-	so->hashso_sk_hash = hashkey;
+		/*
+		 * If the constant in the index qual is NULL, assume it cannot match any
+		 * items in the index.
+		 */
+		if (cur->sk_flags & SK_ISNULL)
+			return false;
+
+		/*
+		 * Okay to compute the hash key.  We want to do this before acquiring any
+		 * locks, in case a user-defined hash function happens to be slow.
+		 *
+		 * If scankey operator is not a cross-type comparison, we can use the
+		 * cached hash function; otherwise gotta look it up in the catalogs.
+		 *
+		 * We support the convention that sk_subtype == InvalidOid means the
+		 * opclass input type; this is a hack to simplify life for ScanKeyInit().
+		 */
+		if (cur->sk_subtype == rel->rd_opcintype[0] ||
+			cur->sk_subtype == InvalidOid)
+			hashkey = _hash_datum2hashkey(rel, cur->sk_argument);
+		else
+			hashkey = _hash_datum2hashkey_type(rel, cur->sk_argument,
+											   cur->sk_subtype);
+
+		so->hashso_sk_hash = hashkey;
+
+		buf = _hash_getbucketbuf_from_hashkey(rel, hashkey, so->hashso_access, NULL);
+		PredicateLockPage(rel, BufferGetBlockNumber(buf), scan->xs_snapshot);
+		so->hashso_bucket_buf = buf;
+	}
+	else
+	{
+		Assert(so->hashso_access == HASH_WRITE);
+		buf = so->hashso_bucket_buf;
+	}
 
-	buf = _hash_getbucketbuf_from_hashkey(rel, hashkey, HASH_READ, NULL);
-	PredicateLockPage(rel, BufferGetBlockNumber(buf), scan->xs_snapshot);
 	page = BufferGetPage(buf);
 	TestForOldSnapshot(scan->xs_snapshot, rel, page);
 	opaque = (HashPageOpaque) PageGetSpecialPointer(page);
 	bucket = opaque->hasho_bucket;
 
-	so->hashso_bucket_buf = buf;
-
 	/*
 	 * If a bucket split is in progress, then while scanning the bucket being
 	 * populated, we need to skip tuples that were copied from bucket being
@@ -394,7 +419,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
 		so->hashso_split_bucket_buf = old_buf;
 		LockBuffer(old_buf, BUFFER_LOCK_UNLOCK);
 
-		LockBuffer(buf, BUFFER_LOCK_SHARE);
+		LockBuffer(buf, so->hashso_access);
 		page = BufferGetPage(buf);
 		opaque = (HashPageOpaque) PageGetSpecialPointer(page);
 		Assert(opaque->hasho_bucket == bucket);
@@ -584,7 +609,11 @@ _hash_readpage(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
 	{
 		so->currPos.prevPage = InvalidBlockNumber;
 		so->currPos.nextPage = opaque->hasho_nextblkno;
-		LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
+
+		/* Keep the bucket lock if we are writing */
+		if (so->currPos.buf == so->hashso_split_bucket_buf ||
+			so->hashso_access == HASH_READ)
+			LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
 	}
 	else
 	{
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 3ce42483ed..65bf285aa5 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -116,7 +116,8 @@ _h_spool(HSpool *hspool, ItemPointer self, Datum *values, bool *isnull)
  * create an entire index.
  */
 void
-_h_indexbuild(HSpool *hspool, Relation heapRel)
+_h_indexbuild(HSpool *hspool, Relation heapRel,
+				IndexUniqueCheck checkUnique, int NumIndexKeyAttrs)
 {
 	IndexTuple	itup;
 	int64		tups_done = 0;
@@ -144,7 +145,12 @@ _h_indexbuild(HSpool *hspool, Relation heapRel)
 		Assert(hashkey >= lasthashkey);
 #endif
 
-		_hash_doinsert(hspool->index, itup, heapRel);
+		_hash_doinsert(hspool->index,
+						itup,
+						heapRel,
+						checkUnique,
+						NumIndexKeyAttrs,
+						false);
 
 		pgstat_progress_update_param(PROGRESS_CREATEIDX_TUPLES_DONE,
 									 ++tups_done);
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 57c346fe28..60f4381e81 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -161,6 +161,9 @@ typedef struct HashScanOpaqueData
 	/* remember the buffer associated with primary bucket */
 	Buffer		hashso_bucket_buf;
 
+	/* access type: either HASH_READ or HASH_WRITE */
+	int			hashso_access;
+
 	/*
 	 * remember the buffer associated with primary bucket page of bucket being
 	 * split.  it is required during the scan of the bucket which is being
@@ -388,7 +391,8 @@ extern void hashadjustmembers(Oid opfamilyoid,
 /* private routines */
 
 /* hashinsert.c */
-extern void _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel);
+extern bool _hash_doinsert(Relation rel, IndexTuple itup, Relation heapRel,
+						   IndexUniqueCheck checkUnique, int num_index_attrs, bool indexUnchanged);
 extern OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf,
 								   Size itemsize, IndexTuple itup);
 extern void _hash_pgaddmultitup(Relation rel, Buffer buf, IndexTuple *itups,
@@ -448,7 +452,8 @@ extern HSpool *_h_spoolinit(Relation heap, Relation index, uint32 num_buckets);
 extern void _h_spooldestroy(HSpool *hspool);
 extern void _h_spool(HSpool *hspool, ItemPointer self,
 					 Datum *values, bool *isnull);
-extern void _h_indexbuild(HSpool *hspool, Relation heapRel);
+extern void _h_indexbuild(HSpool *hspool, Relation heapRel,
+							IndexUniqueCheck checkUnique, int num_index_attrs);
 
 /* hashutil.c */
 extern bool _hash_checkqual(IndexScanDesc scan, IndexTuple itup);
diff --git a/src/test/regress/expected/amutils.out b/src/test/regress/expected/amutils.out
index 226aed4afd..b976190a82 100644
--- a/src/test/regress/expected/amutils.out
+++ b/src/test/regress/expected/amutils.out
@@ -162,7 +162,7 @@ select amname, prop, pg_indexam_has_property(a.oid, prop) as p
  gist   | can_include   | t
  gist   | bogus         | 
  hash   | can_order     | f
- hash   | can_unique    | f
+ hash   | can_unique    | t
  hash   | can_multi_col | t
  hash   | can_exclude   | t
  hash   | can_include   | f
