Here is a patch to create unique GiST indexes. It is not really
finished yet, but I thought I would share. It is intended to address
the bug discussed at [1], where calling pg_get_indexdef on a WITHOUT
OVERLAPS constraint generates SQL that Postgres refuses to run. I
realized after sending that email that the thread is on pgsql-bugs,
but I think it belongs on pgsql-hackers now (for one reason, so I can
create a commitfest entry). Please see my notes at that link, and
apologies for the extra noise.

[1] 
https://www.postgresql.org/message-id/CA%2BrenyU4jKCxrtASJpssZmfrkWhi-%2BQ_PF__jxt8E23T755SPQ%40mail.gmail.com

Yours,

-- 
Paul              ~{:-)
[email protected]
From cdb24a19d52390b8a3366ca6ba2554215c83c9e7 Mon Sep 17 00:00:00 2001
From: "Paul A. Jungwirth" <[email protected]>
Date: Tue, 4 Nov 2025 19:05:42 -0800
Subject: [PATCH v1] Allow unique GiST indexes

- Added tests showing you can CREATE UNIQUE INDEX ... USING gist, with correct
  semantics building the index and adding to the index.
- Updated the gist README.
- Updated documentation.
- Added excludeFn to the GISTSTATE, used to compare values.
- Added gist_check_unique to do the work. We call this once we have found the
  leaf page, then we check all tuples on the page. As an optimization we could
  first check the consistent function.

Because GiST is lossy (at least if the opclass provides a compress function),
the Datum in the index might not match the Datum in the heap. But the excludeFn
needs heap Datums. So we have to load them before checking. That is not really a
big performance hit though, because we need the heap tuples anyway for
visibility information.

If we detect that we are backing a WITHOUT OVERLAPS constraint (because
indisexclusion is set at the same time as indisunique), then we use overlaps for
the last element's excludeFn. Also we adjust our error messages to match what
the exclusion constraint would produce. This means that almost all the work in
check_exclusion_or_unique_constraint is redundant. Perhaps we should detect that
for WITHOUT OVERLAPS constraints we can rely on the index to have checked that
already? (It still needs to reject empty values though.) In a couple cases I had
to update tests for CREATE CONSTRAINT ... WITHOUT OVERLAPS, because we now
encounter a different conflicting tuple.

I haven't dealt with MVCC issues yet. When we find a not-yet-committed
conflicting tuple, we need to wait and re-check after that transaction has
finished. See nbtree for how this needs to work.

Also I'm trying to handle NULLS NOT DISTINCT correctly. In nbtree the code
assumes that nulls are never equal. So is that handled outside of specific IAMs?
---
 src/backend/access/gist/gist.c                | 359 +++++++++++++++++-
 src/backend/access/gist/gistbuild.c           |  33 +-
 src/backend/utils/sort/tuplesortvariants.c    |   6 +-
 src/include/access/gist_private.h             |   7 +-
 src/include/utils/tuplesort.h                 |   2 +
 src/test/regress/expected/amutils.out         |   4 +-
 src/test/regress/expected/gist.out            |  32 ++
 .../regress/expected/without_overlaps.out     |   4 +-
 src/test/regress/sql/gist.sql                 |  17 +
 9 files changed, 447 insertions(+), 17 deletions(-)

diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 4b943b7f43e..14abfc16b6a 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -16,14 +16,18 @@
 
 #include "access/gist_private.h"
 #include "access/gistscan.h"
+#include "access/tableam.h"
 #include "access/xloginsert.h"
+#include "catalog/pg_am.h"
 #include "catalog/pg_collation.h"
+#include "commands/defrem.h"
 #include "commands/vacuum.h"
 #include "miscadmin.h"
 #include "nodes/execnodes.h"
 #include "storage/predicate.h"
 #include "utils/fmgrprotos.h"
 #include "utils/index_selfuncs.h"
+#include "utils/lsyscache.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
@@ -69,7 +73,7 @@ gisthandler(PG_FUNCTION_ARGS)
 		.amconsistentequality = false,
 		.amconsistentordering = false,
 		.amcanbackward = false,
-		.amcanunique = false,
+		.amcanunique = true,
 		.amcanmulticol = true,
 		.amoptionalkey = true,
 		.amsearcharray = false,
@@ -161,6 +165,7 @@ gistbuildempty(Relation index)
  *
  *	  This is the public interface routine for tuple insertion in GiSTs.
  *	  It doesn't do any work; just locks the relation and passes the buck.
+ *	  TODO: I don't see where it locks the relation?
  */
 bool
 gistinsert(Relation r, Datum *values, bool *isnull,
@@ -172,12 +177,15 @@ gistinsert(Relation r, Datum *values, bool *isnull,
 	GISTSTATE  *giststate = (GISTSTATE *) indexInfo->ii_AmCache;
 	IndexTuple	itup;
 	MemoryContext oldCxt;
+	bool		known_unique;
 
 	/* Initialize GISTSTATE cache if first call in this statement */
 	if (giststate == NULL)
 	{
 		oldCxt = MemoryContextSwitchTo(indexInfo->ii_Context);
 		giststate = initGISTstate(r);
+		if (checkUnique != UNIQUE_CHECK_NO)
+			initGISTstateExclude(giststate, r);
 		giststate->tempCxt = createTempGistContext();
 		indexInfo->ii_AmCache = giststate;
 		MemoryContextSwitchTo(oldCxt);
@@ -188,13 +196,13 @@ gistinsert(Relation r, Datum *values, bool *isnull,
 	itup = gistFormTuple(giststate, r, values, isnull, true);
 	itup->t_tid = *ht_ctid;
 
-	gistdoinsert(r, itup, 0, giststate, heapRel, false);
+	known_unique = gistdoinsert(r, itup, checkUnique, values, isnull, 0, giststate, heapRel, false);
 
 	/* cleanup */
 	MemoryContextSwitchTo(oldCxt);
 	MemoryContextReset(giststate->tempCxt);
 
-	return false;
+	return known_unique;
 }
 
 
@@ -630,14 +638,232 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate,
 	return is_split;
 }
 
+/*
+ * gist_check_unique -- enforce UNIQUEness with given excludeFn.
+ *
+ * ereports if there is a definite conflict, or returns InvalidTransactionId if
+ * there is no conflict, or returns the TransactionId that must be waited on if
+ * there is a potential conflict from a not-yet-committed transaction.
+ *
+ * The giststate must already point to a leaf page.
+ *
+ * Our definition of uniqueness is based on the excludeFn, which does not
+ * necessarily check for equality. That lets you create so-called UNIQUE indexes
+ * that forbid something else, like overlaps. This is useful for indexes
+ * backing temporal constraints (i.e. WITHOUT OVERLAPS).
+ *
+ * Uniqueness can only be enforced if the GISTENTRY Datum is the same as the
+ * heap Datum---in other words there is no compress support proc.
+ */
+static TransactionId
+gist_check_unique(Relation rel, GISTSTATE *giststate, GISTInsertState *state,
+				  Relation heapRel, IndexTuple itup, Datum *newvals, bool *newnulls,
+				  IndexUniqueCheck checkUnique, bool *is_unique)
+{
+	Page		page;
+	OffsetNumber maxoff;
+	OffsetNumber i;
+	MemoryContext oldcxt;
+	bool		conflicts = false;
+	SnapshotData SnapshotDirty;
+	IndexFetchTableData *scan;
+	TupleTableSlot *slot;
+	bool		call_again = false;
+	bool		found = false;
+	bool		all_dead = false; // TODO: use this? nbtree does.
+	Datum		oldvals[INDEX_MAX_KEYS];
+	bool		oldnulls[INDEX_MAX_KEYS];
+
+	InitDirtySnapshot(SnapshotDirty);
+
+	slot = table_slot_create(heapRel, NULL);
+
+	/* Check all tuples on the page for a conflict. */
+	page = state->stack->page;
+	maxoff = PageGetMaxOffsetNumber(page);
+	for (i = FirstOffsetNumber; i <= maxoff && !conflicts; i = OffsetNumberNext(i))
+	{
+		ItemId		iid = PageGetItemId(page, i);
+		IndexTuple	it;
+		// bool		match;
+		// bool		recheck;	// TODO
+		// bool		recheck_distances;
+		int			j;
+
+		/*
+		 * XXX: orindary scans only skip dead items if passed
+		 * ignore_killed_tuples. Do we ever want to see killed tuples?
+		 */
+		if (ItemIdIsDead(iid))
+			continue;
+
+		it = (IndexTuple) PageGetItem(page, iid);
+
+		/*
+		 * Get the original Datums from the heap.
+		 * We have to look there anyway for visibility info.
+		 * We'll pass these Datums to the excludeFn.
+		 * If the opclass has no compression function,
+		 * we could use the Datums from the index tuple instead,
+		 * but as long as we're loading the heap record
+		 * we might as well use it.
+		 */
+
+		scan = table_index_fetch_begin(heapRel);
+		do {
+			// TODO: What if t_tid gets modified?
+			found |= table_index_fetch_tuple(scan, &it->t_tid, &SnapshotDirty, slot,
+											&call_again, &all_dead);
+			/*
+			 * XXX: Is looping on call_again actually needed here?
+			 * table_index_fetch_tuple_check ignores it.
+			 * But since we use the Datums, we do need the latest values.
+			 */
+		} while (call_again);
+		if (!found)
+		{
+			table_index_fetch_end(scan);
+			continue;
+		}
+		slot_getallattrs(slot);
+
+		conflicts = true;
+
+		oldcxt = MemoryContextSwitchTo(giststate->tempCxt);
+
+		/* Check all the key elements against excludeFn */
+		for (j = 0; j < giststate->leafTupdesc->natts; j++)
+		{
+			int		heapattr;
+			Datum	oldval;
+			Datum	newval;
+			bool	oldIsNull;
+			Datum	test;
+
+			Assert(OidIsValid(giststate->excludeFn[j].fn_oid));
+
+			// TODO: If it's an expression index with a compress function,
+			// we have to give up. Unless we want to re-evaluate the expression.
+			heapattr = rel->rd_index->indkey.values[j];
+
+			// XXX: I assume the index has a null iff the heap does?
+			// XXX: What about nulls_not_distinct? Btree seems to ignore that
+			// though in access/nbtree/nbtinsert.c:_bt_doinsert
+			oldIsNull = slot->tts_isnull[heapattr - 1];
+			if (oldIsNull)
+			{
+				conflicts = false;
+				break;
+			}
+			else
+			{
+				oldval = slot->tts_values[heapattr - 1];
+			}
+			/*
+			 * Record the heap Datums in index-attribute order,
+			 * so we can build an error message below.
+			 */
+			oldnulls[j] = oldIsNull;
+			oldvals[j] = oldval;
+
+			// XXX: I assume the index has a null iff the heap does?
+			// XXX: What about nulls_not_distinct? Btree seems to ignore that
+			// though in access/nbtree/nbtinsert.c:_bt_doinsert
+			if (newnulls[j])
+			{
+				// XXX: probably a higher level prevents even checking the index here?
+				conflicts = false;
+				break;
+			}
+			else
+			{
+				// TODO: detoast the newvals up front, only once?
+				newval = newvals[j];
+			}
+
+			// TODO: If we check nulls_not_distinct here,
+			// and both new and old are null,
+			// then treat it as a conflict.
+			// Somehow nbtree doesn't do that.
+
+			test = FunctionCall2Coll(&giststate->excludeFn[j],
+									 InvalidOid,	// TODO: collation
+									 oldval,
+									 newval);
+			/*
+			 * If all attributes conflict, then we violate uniqueness.
+			 * So we can abort on the first non-conflicting attribute.
+			 */
+			conflicts &= DatumGetBool(test);
+			if (!conflicts)
+				break;
+		}
+
+		table_index_fetch_end(scan);
+		MemoryContextSwitchTo(oldcxt);
+	}
+	*is_unique = !conflicts;
+
+	// TODO: depends on checkUnique
+	// TODO: deal with MVCC
+	if (conflicts) {
+		char *key_desc = BuildIndexValueDescription(rel, newvals, newnulls);
+
+		/* For WITHOUT OVERLAPS, match the exclusion constraint message */
+		if (rel->rd_index->indisexclusion)
+		{
+			char *old_key_desc = BuildIndexValueDescription(rel, oldvals, oldnulls);
+			if (state->is_build)
+				ereport(ERROR,
+						(errcode(ERRCODE_UNIQUE_VIOLATION),
+						 errmsg("could not create exclusion constraint \"%s\"",
+								RelationGetRelationName(rel)),
+						 key_desc && old_key_desc ? errdetail("Key %s conflicts with key %s.",
+											  key_desc, old_key_desc) : 0,
+						 errtableconstraint(heapRel,
+											RelationGetRelationName(rel))));
+			else
+				ereport(ERROR,
+						(errcode(ERRCODE_UNIQUE_VIOLATION),
+						 errmsg("conflicting key value violates exclusion constraint \"%s\"",
+								RelationGetRelationName(rel)),
+						 key_desc && old_key_desc ? errdetail("Key %s conflicts with existing key %s.",
+											  key_desc, old_key_desc) : 0,
+						 errtableconstraint(heapRel,
+											RelationGetRelationName(rel))));
+		}
+		else
+			ereport(ERROR,
+					(errcode(ERRCODE_UNIQUE_VIOLATION),
+					 errmsg("duplicate key value violates unique constraint \"%s\"",
+							RelationGetRelationName(rel)),
+					 key_desc ? errdetail("Key %s already exists.",
+										  key_desc) : 0,
+					 errtableconstraint(heapRel,
+										RelationGetRelationName(rel))));
+	}
+
+	ExecDropSingleTupleTableSlot(slot);
+
+	// TODO: xwait when it is not unique
+	return InvalidTransactionId;
+}
+
 /*
  * Workhorse routine for doing insertion into a GiST index. Note that
  * this routine assumes it is invoked in a short-lived memory context,
  * so it does not bother releasing palloc'd allocations.
+ *
+ * The result value is only significant for UNIQUE_CHECK_PARTIAL:
+ * it must be true if the entry is known unique, else false.
+ * (In the current implementation we'll also return true after a
+ * successful UNIQUE_CHECK_YES or UNIQUE_CHECK_EXISTING call, but
+ * that's just a coding artifact.)
  */
-void
-gistdoinsert(Relation r, IndexTuple itup, Size freespace,
-			 GISTSTATE *giststate, Relation heapRel, bool is_build)
+bool
+gistdoinsert(Relation r, IndexTuple itup, IndexUniqueCheck checkUnique,
+			 Datum *values, bool *isnull,
+			 Size freespace, GISTSTATE *giststate, Relation heapRel, bool is_build)
 {
 	ItemId		iid;
 	IndexTuple	idxtuple;
@@ -645,6 +871,10 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace,
 	GISTInsertStack *stack;
 	GISTInsertState state;
 	bool		xlocked = false;
+	bool		checkingunique = (checkUnique != UNIQUE_CHECK_NO);
+	bool		is_unique = false;
+
+search:
 
 	memset(&state, 0, sizeof(GISTInsertState));
 	state.freespace = freespace;
@@ -836,6 +1066,12 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace,
 			 * Leaf page. Insert the new key. We've already updated all the
 			 * parents on the way down, but we might have to split the page if
 			 * it doesn't fit. gistinserttuple() will take care of that.
+			 * TODO: If the parents are already updated,
+			 * isn't that a problem if the uniqueness check fails??
+			 * Do we need to descend twice and keep the locks longer for unique
+			 * indexes?
+			 * If guess if the new entry *is* non-unique, the parents didn't
+			 * actually change, right?
 			 */
 
 			/*
@@ -889,16 +1125,47 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace,
 
 			/* now state.stack->(page, buffer and blkno) points to leaf page */
 
-			gistinserttuple(&state, stack, giststate, itup,
-							InvalidOffsetNumber);
+			// TODO: now that we have the lock,
+			// we can check for uniqueness and raise an error if needed.
+			if (checkingunique)
+			{
+				TransactionId xwait;
+				// If there are null values, it can't be unique
+				// TODO: but what about NULLS NOT DISTINCT? I don't see that in
+				// nbtree _bt_doinsert.
+				// For btree this seems to be handled in utils/sort/tuplesortvariants.c
+				// (which of course is outside of access/nbtree, but has btree
+				// in many function names).
+
+				xwait = gist_check_unique(r, giststate, &state, heapRel, itup,
+										  values, isnull,
+										  checkUnique, &is_unique);
+				if (unlikely(TransactionIdIsValid(xwait)))
+				{
+					// TODO: more here
+					goto search;
+				}
+			}
+
+			if (checkUnique != UNIQUE_CHECK_EXISTING)
+			{
+				// TODO: need SERIALIZABLE check like nbtree?
+
+				gistinserttuple(&state, stack, giststate, itup,
+								InvalidOffsetNumber);
+			}
+
 			LockBuffer(stack->buffer, GIST_UNLOCK);
 
 			/* Release any pins we might still hold before exiting */
 			for (; stack; stack = stack->parent)
 				ReleaseBuffer(stack->buffer);
+
 			break;
 		}
 	}
+
+	return is_unique;
 }
 
 /*
@@ -1623,6 +1890,15 @@ initGISTstate(Relation index)
 		else
 			giststate->fetchFn[i].fn_oid = InvalidOid;
 
+		/*
+		 * We only need excludeFn for UNIQUE indexes.
+		 * XXX: We could use this to enforce exclusion constraints
+		 * from the index instead of the constraint (which is how
+		 * unique constraints work at any rate). Or we could at least
+		 * enforce WITHOUT OVERLAPS semantics.
+		 */
+		giststate->excludeFn[i].fn_oid = InvalidOid;
+
 		/*
 		 * If the index column has a specified collation, we should honor that
 		 * while doing comparisons.  However, we may have a collatable storage
@@ -1652,6 +1928,7 @@ initGISTstate(Relation index)
 		giststate->equalFn[i].fn_oid = InvalidOid;
 		giststate->distanceFn[i].fn_oid = InvalidOid;
 		giststate->fetchFn[i].fn_oid = InvalidOid;
+		giststate->excludeFn[i].fn_oid = InvalidOid;
 		giststate->supportCollation[i] = InvalidOid;
 	}
 
@@ -1660,6 +1937,72 @@ initGISTstate(Relation index)
 	return giststate;
 }
 
+/*
+ * Initialize excludeFn (assumes this is a UNIQUE index).
+ */
+void
+initGISTstateExclude(GISTSTATE *giststate, Relation index)
+{
+	int			natts = IndexRelationGetNumberOfKeyAttributes(index);
+	int			i;
+
+	Assert(index->rd_index->indisunique);
+
+	for (i = 0; i < natts; i++)
+	{
+		/*
+		 * Set the operator (or rather its function) for the equality CompareType
+		 * (or fail).
+		 *
+		 * Even the GiST AM doesn't know the strategy numbers chosen by GiST
+		 * opfamilies. For example btree_gist uses BT*StrategyNumber even though
+		 * built-in opclasses uses RT*StrategyNumber. So we use the CompareType
+		 * infrastructure to find an operator that implements equality.
+		 *
+		 * XXX: We assume that all opclasses within an opfamily use the same
+		 * strategy numbers. This is probably a fair assumption, but it
+		 * means we disregard the index attributes' opclasses when getting
+		 * stratnums.
+		 */
+		RegProcedure proc;
+		Oid opfamily = index->rd_opfamily[i];
+		CompareType cmptype;
+		StrategyNumber strat;
+		Oid				opid;
+
+		/*
+		 * If this is a WITHOUT OVERLAPS index, we use overlaps for the last
+		 * element. Otherwise we use equality. The only time both indisunique
+		 * and indisexclusion are set is for WITHOUT OVERLAPS.
+		 *
+		 * XXX: We could do checking for all exclusion constraints here,
+		 * if we wanted to look up their operators.
+		 */
+		if (index->rd_index->indisexclusion && i == natts - 1)
+			cmptype = COMPARE_OVERLAP;
+		else
+			cmptype = COMPARE_EQ;
+
+		strat = IndexAmTranslateCompareType(cmptype, GIST_AM_OID, opfamily, false);
+
+
+		opid = get_opfamily_member(index->rd_opfamily[i],
+								   index->rd_opcintype[i],
+								   index->rd_opcintype[i], strat);
+		if (!OidIsValid(opid))
+			ereport(ERROR,
+					errmsg("could not identify equality operator for unique constraint"),
+					errdetail("Could not translate compare type %d for operator family \"%s\" of access method \"%s\".",
+							  cmptype, get_opfamily_name(opfamily, false), get_am_name(GIST_AM_OID)));
+
+		proc = get_opcode(opid);
+		if (!OidIsValid(proc))
+			elog(ERROR, "cache lookup failed for operator %u", opid);
+
+		fmgr_info_cxt(proc, &(giststate->excludeFn[i]), giststate->scanCxt);
+	}
+}
+
 void
 freeGISTstate(GISTSTATE *giststate)
 {
diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c
index b9fa196149d..9986af49c61 100644
--- a/src/backend/access/gist/gistbuild.c
+++ b/src/backend/access/gist/gistbuild.c
@@ -108,6 +108,9 @@ typedef struct
 	BlockNumber pages_allocated;
 
 	BulkWriteState *bulkstate;
+
+	bool		isunique;
+	bool		nulls_not_distinct;
 } GISTBuildState;
 
 #define GIST_SORTED_BUILD_PAGE_NUM 4
@@ -198,6 +201,10 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 	buildstate.heaprel = heap;
 	buildstate.sortstate = NULL;
 	buildstate.giststate = initGISTstate(index);
+	if (indexInfo->ii_Unique)
+		initGISTstateExclude(buildstate.giststate, index);
+	buildstate.isunique = indexInfo->ii_Unique;
+	buildstate.nulls_not_distinct = indexInfo->ii_NullsNotDistinct;
 
 	/*
 	 * Create a temporary memory context that is reset once for each tuple
@@ -266,6 +273,8 @@ gistbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 		 */
 		buildstate.sortstate = tuplesort_begin_index_gist(heap,
 														  index,
+														  buildstate.isunique,
+														  buildstate.nulls_not_distinct,
 														  maintenance_work_mem,
 														  NULL,
 														  TUPLESORT_NONE);
@@ -862,8 +871,28 @@ gistBuildCallback(Relation index,
 		 * There's no buffers (yet). Since we already have the index relation
 		 * locked, we call gistdoinsert directly.
 		 */
-		gistdoinsert(index, itup, buildstate->freespace,
-					 buildstate->giststate, buildstate->heaprel, true);
+		bool known_unique = gistdoinsert(index, itup,
+										 buildstate->isunique ? UNIQUE_CHECK_YES
+															  : UNIQUE_CHECK_NO,
+										 values, isnull,
+										 buildstate->freespace,
+										 buildstate->giststate,
+										 buildstate->heaprel, true);
+		/*
+		 * There are no other users of the index yet, so if we aren't sure it's
+		 * unique, there must be duplicates.
+		 * TODO: right??
+		 */
+		// TODO: say which keys are duplicated if possible (as in
+		// utils/sort/tuplesortvariants.c)
+		if (buildstate->isunique && !known_unique)
+			ereport(ERROR,
+					(errcode(ERRCODE_UNIQUE_VIOLATION),
+					 errmsg("could not create unique index \"%s\"",
+							RelationGetRelationName(buildstate->indexrel)),
+					 errdetail("Duplicate keys exist."),
+					 errtableconstraint(buildstate->heaprel,
+										RelationGetRelationName(buildstate->indexrel))));
 	}
 
 	MemoryContextSwitchTo(oldCtx);
diff --git a/src/backend/utils/sort/tuplesortvariants.c b/src/backend/utils/sort/tuplesortvariants.c
index a1f5c19ee97..8d2629490bd 100644
--- a/src/backend/utils/sort/tuplesortvariants.c
+++ b/src/backend/utils/sort/tuplesortvariants.c
@@ -490,6 +490,8 @@ tuplesort_begin_index_hash(Relation heapRel,
 Tuplesortstate *
 tuplesort_begin_index_gist(Relation heapRel,
 						   Relation indexRel,
+						   bool enforceUnique,
+						   bool uniqueNullsNotDistinct,
 						   int workMem,
 						   SortCoordinate coordinate,
 						   int sortopt)
@@ -521,8 +523,8 @@ tuplesort_begin_index_gist(Relation heapRel,
 
 	arg->index.heapRel = heapRel;
 	arg->index.indexRel = indexRel;
-	arg->enforceUnique = false;
-	arg->uniqueNullsNotDistinct = false;
+	arg->enforceUnique = enforceUnique;
+	arg->uniqueNullsNotDistinct = uniqueNullsNotDistinct;
 
 	/* Prepare SortSupport data for each column */
 	base->sortKeys = (SortSupport) palloc0(base->nKeys *
diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h
index 39404ec7cdb..415d1d3b00a 100644
--- a/src/include/access/gist_private.h
+++ b/src/include/access/gist_private.h
@@ -92,6 +92,7 @@ typedef struct GISTSTATE
 	FmgrInfo	equalFn[INDEX_MAX_KEYS];
 	FmgrInfo	distanceFn[INDEX_MAX_KEYS];
 	FmgrInfo	fetchFn[INDEX_MAX_KEYS];
+	FmgrInfo	excludeFn[INDEX_MAX_KEYS];
 
 	/* Collations to pass to the support functions */
 	Oid			supportCollation[INDEX_MAX_KEYS];
@@ -407,9 +408,13 @@ extern bool gistinsert(Relation r, Datum *values, bool *isnull,
 					   struct IndexInfo *indexInfo);
 extern MemoryContext createTempGistContext(void);
 extern GISTSTATE *initGISTstate(Relation index);
+extern void initGISTstateExclude(GISTSTATE *giststate, Relation index);
 extern void freeGISTstate(GISTSTATE *giststate);
-extern void gistdoinsert(Relation r,
+extern bool gistdoinsert(Relation r,
 						 IndexTuple itup,
+						 IndexUniqueCheck checkUnique,
+						 Datum *values,
+						 bool *isnull,
 						 Size freespace,
 						 GISTSTATE *giststate,
 						 Relation heapRel,
diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h
index 63a7cc13a31..b9f04f6dc50 100644
--- a/src/include/utils/tuplesort.h
+++ b/src/include/utils/tuplesort.h
@@ -440,6 +440,8 @@ extern Tuplesortstate *tuplesort_begin_index_hash(Relation heapRel,
 												  int sortopt);
 extern Tuplesortstate *tuplesort_begin_index_gist(Relation heapRel,
 												  Relation indexRel,
+												  bool enforceUnique,
+												  bool uniqueNullsNotDistinct,
 												  int workMem, SortCoordinate coordinate,
 												  int sortopt);
 extern Tuplesortstate *tuplesort_begin_index_brin(int workMem, SortCoordinate coordinate,
diff --git a/src/test/regress/expected/amutils.out b/src/test/regress/expected/amutils.out
index 7ab6113c619..9069e082ca5 100644
--- a/src/test/regress/expected/amutils.out
+++ b/src/test/regress/expected/amutils.out
@@ -72,7 +72,7 @@ select prop,
  bitmap_scan        |    | t     | 
  backward_scan      |    | f     | 
  can_order          | f  |       | 
- can_unique         | f  |       | 
+ can_unique         | t  |       | 
  can_multi_col      | t  |       | 
  can_exclude        | t  |       | 
  can_include        | t  |       | 
@@ -156,7 +156,7 @@ select amname, prop, pg_indexam_has_property(a.oid, prop) as p
  gin    | can_include   | f
  gin    | bogus         | 
  gist   | can_order     | f
- gist   | can_unique    | f
+ gist   | can_unique    | t
  gist   | can_multi_col | t
  gist   | can_exclude   | t
  gist   | can_include   | t
diff --git a/src/test/regress/expected/gist.out b/src/test/regress/expected/gist.out
index c75bbb23b6e..60e5eb7c407 100644
--- a/src/test/regress/expected/gist.out
+++ b/src/test/regress/expected/gist.out
@@ -390,6 +390,38 @@ ERROR:  lossy distance functions are not supported in index-only scans
 -- Force an index build using buffering.
 create index gist_tbl_box_index_forcing_buffering on gist_tbl using gist (p)
   with (buffering=on, fillfactor=50);
+-- create unique indexes
+create table gist_rngtbl (id int4range);
+insert into gist_rngtbl values ('[1,2)'), ('[2,3)'); -- okay
+create unique index uq_gist_rngtbl on gist_rngtbl using gist (id);
+insert into gist_rngtbl values ('[3,4)'), ('[4,5)'); -- okay
+\d gist_rngtbl
+             Table "public.gist_rngtbl"
+ Column |   Type    | Collation | Nullable | Default 
+--------+-----------+-----------+----------+---------
+ id     | int4range |           |          | 
+Indexes:
+    "uq_gist_rngtbl" UNIQUE, gist (id)
+
+select pg_get_indexdef('uq_gist_rngtbl'::regclass);
+                             pg_get_indexdef                              
+--------------------------------------------------------------------------
+ CREATE UNIQUE INDEX uq_gist_rngtbl ON public.gist_rngtbl USING gist (id)
+(1 row)
+
+-- enforced on build
+drop index uq_gist_rngtbl;
+insert into gist_rngtbl values ('[1,2)');
+create unique index uq_gist_rngtbl on gist_rngtbl using gist (id); -- fail
+ERROR:  could not create unique index "uq_gist_rngtbl"
+DETAIL:  Key (id)=([1,2)) is duplicated.
+truncate gist_rngtbl;
+-- enforced on insert
+create unique index uq_gist_rngtbl on gist_rngtbl using gist (id);
+insert into gist_rngtbl values ('[1,2)'), ('[2,3)'); -- okay
+insert into gist_rngtbl values ('[1,2)'); -- fail
+ERROR:  duplicate key value violates unique constraint "uq_gist_rngtbl"
+DETAIL:  Key (id)=([1,2)) already exists.
 -- Clean up
 reset enable_seqscan;
 reset enable_bitmapscan;
diff --git a/src/test/regress/expected/without_overlaps.out b/src/test/regress/expected/without_overlaps.out
index f3144bdc39c..ef44ec6b51e 100644
--- a/src/test/regress/expected/without_overlaps.out
+++ b/src/test/regress/expected/without_overlaps.out
@@ -641,7 +641,7 @@ BEGIN;
   INSERT INTO temporal_mltrng (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-01-01', '2018-01-05')));
   ALTER TABLE temporal_mltrng ADD CONSTRAINT temporal_mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS);
 ERROR:  could not create exclusion constraint "temporal_mltrng_pk"
-DETAIL:  Key (id, valid_at)=([1,2), {[2018-01-02,2018-02-03)}) conflicts with key (id, valid_at)=([1,2), {[2018-01-01,2018-01-05)}).
+DETAIL:  Key (id, valid_at)=([1,2), {[2018-01-01,2018-01-05)}) conflicts with key (id, valid_at)=([1,2), {[2018-01-02,2018-02-03)}).
 ROLLBACK;
 -- rejects empty:
 BEGIN;
@@ -766,7 +766,7 @@ BEGIN;
   INSERT INTO temporal_mltrng3 (id, valid_at) VALUES ('[1,2)', datemultirange(daterange('2018-01-01', '2018-01-05')));
   ALTER TABLE temporal_mltrng3 ADD CONSTRAINT temporal_mltrng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS);
 ERROR:  could not create exclusion constraint "temporal_mltrng3_uq"
-DETAIL:  Key (id, valid_at)=([1,2), {[2018-01-02,2018-02-03)}) conflicts with key (id, valid_at)=([1,2), {[2018-01-01,2018-01-05)}).
+DETAIL:  Key (id, valid_at)=([1,2), {[2018-01-01,2018-01-05)}) conflicts with key (id, valid_at)=([1,2), {[2018-01-02,2018-02-03)}).
 ROLLBACK;
 -- rejects empty:
 BEGIN;
diff --git a/src/test/regress/sql/gist.sql b/src/test/regress/sql/gist.sql
index 6f1fc65f128..c83c6f3c741 100644
--- a/src/test/regress/sql/gist.sql
+++ b/src/test/regress/sql/gist.sql
@@ -173,6 +173,23 @@ select p from gist_tbl order by circle(p,1) <-> point(0,0) limit 1;
 create index gist_tbl_box_index_forcing_buffering on gist_tbl using gist (p)
   with (buffering=on, fillfactor=50);
 
+-- create unique indexes
+create table gist_rngtbl (id int4range);
+insert into gist_rngtbl values ('[1,2)'), ('[2,3)'); -- okay
+create unique index uq_gist_rngtbl on gist_rngtbl using gist (id);
+insert into gist_rngtbl values ('[3,4)'), ('[4,5)'); -- okay
+\d gist_rngtbl
+select pg_get_indexdef('uq_gist_rngtbl'::regclass);
+-- enforced on build
+drop index uq_gist_rngtbl;
+insert into gist_rngtbl values ('[1,2)');
+create unique index uq_gist_rngtbl on gist_rngtbl using gist (id); -- fail
+truncate gist_rngtbl;
+-- enforced on insert
+create unique index uq_gist_rngtbl on gist_rngtbl using gist (id);
+insert into gist_rngtbl values ('[1,2)'), ('[2,3)'); -- okay
+insert into gist_rngtbl values ('[1,2)'); -- fail
+
 -- Clean up
 reset enable_seqscan;
 reset enable_bitmapscan;
-- 
2.47.3

Reply via email to