Hi,

On 2020-06-09 17:04:42 -0400, Robert Haas wrote:
> On Tue, Jun 9, 2020 at 3:37 PM Andres Freund <and...@anarazel.de> wrote:
> > Hm. Looking at this again, perhaps the better fix would be to simply not
> > look at the concrete values of the barrier inside the signal handler?
> > E.g. we could have a new PROCSIG_GLOBAL_BARRIER, which just triggers
> > ProcSignalBarrierPending to be set. And then have
> > ProcessProcSignalBarrier do the check that's currently in
> > CheckProcSignalBarrier()?
> 
> That seems like a good idea.

What do you think about 0002?


With regard to the cost of the expensive test in 0003, I'm somewhat
inclined to add that to the buildfarm for a few days and see how it
actually affects the few bf animals without atomics. We can rip it out
after we got some additional coverage (or leave it in if it turns out to
be cheap enough in comparison).


> Also, I wonder if someone would be willing to set up a BF animal for this.

FWIW, I've requested a buildfarm animal id for this a few days ago, but
haven't received a response yet...

Greetings,

Andres Freund
>From 36601fe27dfefae7f5c1221fbfd364c6d7def4b5 Mon Sep 17 00:00:00 2001
From: Andres Freund <and...@anarazel.de>
Date: Mon, 8 Jun 2020 15:25:49 -0700
Subject: [PATCH v1 1/4] spinlock emulation: Fix bug when more than INT_MAX
 spinlocks are initialized.

Once the counter goes negative we ended up with spinlocks that errored
out on first use (due to check in tas_sema).

Author: Andres Freund
Discussion: https://postgr.es/m/20200606023103.avzrctgv7476x...@alap3.anarazel.de
Backpatch: 9.5-
---
 src/backend/storage/lmgr/spin.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 4d2a4c6641a..753943e46d6 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -106,7 +106,7 @@ SpinlockSemaInit(void)
 void
 s_init_lock_sema(volatile slock_t *lock, bool nested)
 {
-	static int	counter = 0;
+	static uint32 counter = 0;
 
 	*lock = ((++counter) % NUM_SPINLOCK_SEMAPHORES) + 1;
 }
-- 
2.25.0.114.g5b0ca878e0

>From 73d72693fc7a55ae327212f3932d2a45eb47d13b Mon Sep 17 00:00:00 2001
From: Andres Freund <and...@anarazel.de>
Date: Mon, 15 Jun 2020 18:23:10 -0700
Subject: [PATCH v1 2/4] Avoid potential spinlock use inside a signal handler
 for global barriers.

On platforms without support for 64bit atomic operations where we also
cannot rely on 64bit reads to have single copy atomicity, such atomics
are implemented using a spinlock based fallback. That means it's not
safe to even read such atomics from within a signal handler (since the
signal handler might run when the spinlock already is held).

To avoid this issue defer global barrier processing out of the signal
handler. Instead of checking local / shared barrier generation to
determine whether to set ProcSignalBarrierPending, introduce
PROCSIGNAL_BARRIER and always set ProcSignalBarrierPending when
receiving such a signal. Additionally avoid redundant work in
ProcessProcSignalBarrier if ProcSignalBarrierPending is unnecessarily.

Also do a small amount of other polishing.

Author: Andres Freund
Discussion: https://postgr.es/m/20200609193723.eu5ilsjxwdpyx...@alap3.anarazel.de
Backpatch: 13-, where the code was introduced.
---
 src/include/storage/procsignal.h     |  1 +
 src/backend/storage/ipc/procsignal.c | 87 ++++++++++++++++------------
 2 files changed, 52 insertions(+), 36 deletions(-)

diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h
index a0c0bc3ce55..5cb39697f38 100644
--- a/src/include/storage/procsignal.h
+++ b/src/include/storage/procsignal.h
@@ -33,6 +33,7 @@ typedef enum
 	PROCSIG_NOTIFY_INTERRUPT,	/* listen/notify interrupt */
 	PROCSIG_PARALLEL_MESSAGE,	/* message from cooperating parallel backend */
 	PROCSIG_WALSND_INIT_STOPPING,	/* ask walsenders to prepare for shutdown  */
+	PROCSIG_BARRIER,			/* global barrier interrupt  */
 
 	/* Recovery conflict reasons */
 	PROCSIG_RECOVERY_CONFLICT_DATABASE,
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index c809196d06a..4fa385b0ece 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -320,7 +320,7 @@ SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId)
 uint64
 EmitProcSignalBarrier(ProcSignalBarrierType type)
 {
-	uint64		flagbit = UINT64CONST(1) << (uint64) type;
+	uint32		flagbit = 1 << (uint32) type;
 	uint64		generation;
 
 	/*
@@ -363,7 +363,11 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
 		pid_t		pid = slot->pss_pid;
 
 		if (pid != 0)
+		{
+			/* see SendProcSignal for details */
+			slot->pss_signalFlags[PROCSIG_BARRIER] = true;
 			kill(pid, SIGUSR1);
+		}
 	}
 
 	return generation;
@@ -383,6 +387,8 @@ WaitForProcSignalBarrier(uint64 generation)
 {
 	long		timeout = 125L;
 
+	Assert(generation <= pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration));
+
 	for (int i = NumProcSignalSlots - 1; i >= 0; i--)
 	{
 		volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
@@ -417,6 +423,23 @@ WaitForProcSignalBarrier(uint64 generation)
 	pg_memory_barrier();
 }
 
+/*
+ * Handle receipt of an interrupt indicating a global barrier event.
+ *
+ * All the actual work is deferred to ProcessProcSignalBarrier(), because we
+ * cannot safely access the barrier generation inside the signal handler as
+ * 64bit atomics might use spinlock based emulation, even for reads. As this
+ * routine only gets called when PROCSIG_BARRIER is sent that won't cause a
+ * lot fo unnecessary work.
+ */
+static void
+HandleProcSignalBarrierInterrupt(void)
+{
+	InterruptPending = true;
+	ProcSignalBarrierPending = true;
+	/* latch will be set by procsignal_sigusr1_handler */
+}
+
 /*
  * Perform global barrier related interrupt checking.
  *
@@ -428,22 +451,38 @@ WaitForProcSignalBarrier(uint64 generation)
 void
 ProcessProcSignalBarrier(void)
 {
-	uint64		generation;
+	uint64		local_gen;
+	uint64		shared_gen;
 	uint32		flags;
 
+	Assert(MyProcSignalSlot);
+
 	/* Exit quickly if there's no work to do. */
 	if (!ProcSignalBarrierPending)
 		return;
 	ProcSignalBarrierPending = false;
 
 	/*
-	 * Read the current barrier generation, and then get the flags that are
-	 * set for this backend. Note that pg_atomic_exchange_u32 is a full
-	 * barrier, so we're guaranteed that the read of the barrier generation
-	 * happens before we atomically extract the flags, and that any subsequent
-	 * state changes happen afterward.
+	 * It's not unlikely to process multiple barriers at once, before the
+	 * signals for all the barriers have arrived. To avoid unnecessary work in
+	 * response to subsequent signals, exit early if we already have processed
+	 * all of them.
+	 */
+	local_gen = pg_atomic_read_u64(&MyProcSignalSlot->pss_barrierGeneration);
+	shared_gen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
+
+	Assert(local_gen <= shared_gen);
+
+	if (local_gen == shared_gen)
+		return;
+
+	/*
+	 * Get and clear the flags that are set for this backend. Note that
+	 * pg_atomic_exchange_u32 is a full barrier, so we're guaranteed that the
+	 * read of the barrier generation above happens before we atomically
+	 * extract the flags, and that any subsequent state changes happen
+	 * afterward.
 	 */
-	generation = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
 	flags = pg_atomic_exchange_u32(&MyProcSignalSlot->pss_barrierCheckMask, 0);
 
 	/*
@@ -466,7 +505,7 @@ ProcessProcSignalBarrier(void)
 	 * things have changed further, it'll get fixed up when this function is
 	 * next called.
 	 */
-	pg_atomic_write_u64(&MyProcSignalSlot->pss_barrierGeneration, generation);
+	pg_atomic_write_u64(&MyProcSignalSlot->pss_barrierGeneration, shared_gen);
 }
 
 static void
@@ -505,27 +544,6 @@ CheckProcSignal(ProcSignalReason reason)
 	return false;
 }
 
-/*
- * CheckProcSignalBarrier - check for new barriers we need to absorb
- */
-static bool
-CheckProcSignalBarrier(void)
-{
-	volatile ProcSignalSlot *slot = MyProcSignalSlot;
-
-	if (slot != NULL)
-	{
-		uint64		mygen;
-		uint64		curgen;
-
-		mygen = pg_atomic_read_u64(&slot->pss_barrierGeneration);
-		curgen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
-		return (mygen != curgen);
-	}
-
-	return false;
-}
-
 /*
  * procsignal_sigusr1_handler - handle SIGUSR1 signal.
  */
@@ -546,6 +564,9 @@ procsignal_sigusr1_handler(SIGNAL_ARGS)
 	if (CheckProcSignal(PROCSIG_WALSND_INIT_STOPPING))
 		HandleWalSndInitStopping();
 
+	if (CheckProcSignal(PROCSIG_BARRIER))
+		HandleProcSignalBarrierInterrupt();
+
 	if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_DATABASE))
 		RecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_DATABASE);
 
@@ -564,12 +585,6 @@ procsignal_sigusr1_handler(SIGNAL_ARGS)
 	if (CheckProcSignal(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN))
 		RecoveryConflictInterrupt(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN);
 
-	if (CheckProcSignalBarrier())
-	{
-		InterruptPending = true;
-		ProcSignalBarrierPending = true;
-	}
-
 	SetLatch(MyLatch);
 
 	latch_sigusr1_handler();
-- 
2.25.0.114.g5b0ca878e0

>From a77798d9125e799d3a2cdc3e7a514fb869219ed7 Mon Sep 17 00:00:00 2001
From: Andres Freund <and...@anarazel.de>
Date: Mon, 8 Jun 2020 16:36:51 -0700
Subject: [PATCH v1 3/4] Add basic spinlock tests to regression tests.

As s_lock_test, the already existing test for spinlocks, isn't run in
an automated fashion (and doesn't test a normal backend environment),
adding tests that are run as part of a normal regression run is a good
idea.

Currently the new tests are run as part of the pre-existing
test_atomic_ops() test. That can probably be quibbled about.

The only operations that s_lock_test tests but the new tests don't are
the detection of a stuck spinlock and S_LOCK_FREE (which is otherwise
unused).

This currently contains a test for more than INT_MAX spinlocks (only
run with --disable-spinlocks), to ensure the previous commit fixing a
bug with more than INT_MAX spinlock initializations is correct, but
that might be too slow to enable generally.

It might be worth retiring s_lock_test after this. The added coverage
of a stuck spinlock probably isn't worth the added complexity?

Author: Andres Freund
Reviewed-By:
Discussion: https://postgr.es/m/
Backpatch:
---
 src/test/regress/regress.c | 89 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 89 insertions(+)

diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index 960c155e5f2..a48f9de2532 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -794,6 +794,92 @@ test_atomic_uint64(void)
 	EXPECT_EQ_U64(pg_atomic_fetch_and_u64(&var, ~0), 0);
 }
 
+static void
+test_spinlock(void)
+{
+	{
+		struct test_lock_struct
+		{
+			uint32		data_before;
+			slock_t		lock;
+			uint32		data_after;
+		} struct_w_lock;
+
+		struct_w_lock.data_before = 0x44;
+		struct_w_lock.data_after = 0x17;
+
+		/* test basic operations via the SpinLock* API */
+		SpinLockInit(&struct_w_lock.lock);
+		SpinLockAcquire(&struct_w_lock.lock);
+		SpinLockRelease(&struct_w_lock.lock);
+
+		/* test basic operations via underlying S_* API */
+		S_INIT_LOCK(&struct_w_lock.lock);
+		S_LOCK(&struct_w_lock.lock);
+		S_UNLOCK(&struct_w_lock.lock);
+
+		/* and that "contended" acquisition works */
+		s_lock(&struct_w_lock.lock, "testfile", 17, "testfunc");
+		S_UNLOCK(&struct_w_lock.lock);
+
+		/*
+		 * Check, using TAS directly, that a single spin cyle doesn't block
+		 * when acquiring an already acquired lock.
+		 */
+#ifdef TAS
+		S_LOCK(&struct_w_lock.lock);
+		if (!TAS(&struct_w_lock.lock))
+			elog(ERROR, "acquired already held spinlock");
+
+#ifdef TAS_SPIN
+		if (!TAS_SPIN(&struct_w_lock.lock))
+			elog(ERROR, "acquired already held spinlock");
+#endif							/* defined(TAS_SPIN) */
+
+		S_UNLOCK(&struct_w_lock.lock);
+#endif							/* defined(TAS) */
+
+		/*
+		 * Verify that after all of this the non-lock contents are still
+		 * correct.
+		 */
+		EXPECT_EQ_U32(struct_w_lock.data_before, 0x44);
+		EXPECT_EQ_U32(struct_w_lock.data_after, 0x17);
+	}
+
+	/*
+	 * Ensure that allocating more than INT32_MAX simulated spinlocks
+	 * works. This is probably too expensive to run each regression test.
+	 */
+#ifndef HAVE_SPINLOCKS
+	{
+		/*
+		 * Initialize enough spinlocks to advance counter close to
+		 * wraparound. It's too expensive to perform acquire/release for each,
+		 * as those may be syscalls when the spinlock emulation is used (and
+		 * even just atomic TAS would be expensive).
+		 */
+		for (uint32 i = 0; i < INT32_MAX - 100000; i++)
+		{
+			slock_t lock;
+
+			SpinLockInit(&lock);
+		}
+
+		for (uint32 i = 0; i < 200000; i++)
+		{
+			slock_t lock;
+
+			SpinLockInit(&lock);
+
+			SpinLockAcquire(&lock);
+			SpinLockRelease(&lock);
+			SpinLockAcquire(&lock);
+			SpinLockRelease(&lock);
+		}
+	}
+#endif
+}
 
 PG_FUNCTION_INFO_V1(test_atomic_ops);
 Datum
@@ -805,6 +891,9 @@ test_atomic_ops(PG_FUNCTION_ARGS)
 
 	test_atomic_uint64();
 
+	/* XXX: Is there a better location for this? */
+	test_spinlock();
+
 	PG_RETURN_BOOL(true);
 }
 
-- 
2.25.0.114.g5b0ca878e0

>From 5ce7f091400c03a93d10a979264041bba172d8b0 Mon Sep 17 00:00:00 2001
From: Andres Freund <and...@anarazel.de>
Date: Mon, 8 Jun 2020 16:50:37 -0700
Subject: [PATCH v1 4/4] Fix deadlock danger when atomic ops are done under
 spinlock.

This was a danger only for --disable-spinlocks in combination with
atomic operations unsupported by the current platform.

While atomics.c was careful to signal that a separate semaphore ought
to be used when spinlock emulation is active, spin.c didn't actually
implement that mechanism. That's my (Andres') fault, it seems to have
gotten lost during the development of the atomic operations support.

Fix that issue and add test for nesting atomic operations inside a
spinlock.

Author: Andres Freund
Discussion: https://postgr.es/m/20200605023302.g6v3ydozy5txi...@alap3.anarazel.de
Backpatch: 9.5-
---
 src/backend/storage/lmgr/spin.c | 95 +++++++++++++++++++++++----------
 src/test/regress/regress.c      | 43 +++++++++++++++
 2 files changed, 109 insertions(+), 29 deletions(-)

diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 753943e46d6..141606496eb 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -28,8 +28,24 @@
 
 
 #ifndef HAVE_SPINLOCKS
+
+/*
+ * No TAS, so spinlocks are implemented as PGSemaphores.
+ */
+
+#ifndef HAVE_ATOMICS
+#define NUM_SIMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
+#else
+#define NUM_SIMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
+#endif /* DISABLE_ATOMICS */
+
 PGSemaphore *SpinlockSemaArray;
-#endif
+
+#else							/* !HAVE_SPINLOCKS */
+
+#define NUM_SIMULATION_SEMAPHORES 0
+
+#endif							/* HAVE_SPINLOCKS */
 
 /*
  * Report the amount of shared memory needed to store semaphores for spinlock
@@ -38,34 +54,19 @@ PGSemaphore *SpinlockSemaArray;
 Size
 SpinlockSemaSize(void)
 {
-	return SpinlockSemas() * sizeof(PGSemaphore);
+	return NUM_SIMULATION_SEMAPHORES * sizeof(PGSemaphore);
 }
 
-#ifdef HAVE_SPINLOCKS
-
 /*
  * Report number of semaphores needed to support spinlocks.
  */
 int
 SpinlockSemas(void)
 {
-	return 0;
+	return NUM_SIMULATION_SEMAPHORES;
 }
-#else							/* !HAVE_SPINLOCKS */
 
-/*
- * No TAS, so spinlocks are implemented as PGSemaphores.
- */
-
-
-/*
- * Report number of semaphores needed to support spinlocks.
- */
-int
-SpinlockSemas(void)
-{
-	return NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES;
-}
+#ifndef HAVE_SPINLOCKS
 
 /*
  * Initialize spinlock emulation.
@@ -92,23 +93,59 @@ SpinlockSemaInit(void)
 /*
  * s_lock.h hardware-spinlock emulation using semaphores
  *
- * We map all spinlocks onto a set of NUM_SPINLOCK_SEMAPHORES semaphores.
- * It's okay to map multiple spinlocks onto one semaphore because no process
- * should ever hold more than one at a time.  We just need enough semaphores
- * so that we aren't adding too much extra contention from that.
+ * We map all spinlocks onto NUM_SIMULATION_SEMAPHORES semaphores.  It's okay to
+ * map multiple spinlocks onto one semaphore because no process should ever
+ * hold more than one at a time.  We just need enough semaphores so that we
+ * aren't adding too much extra contention from that.
+ *
+ * There is one exception to the restriction of only holding one spinlock at a
+ * time, which is that it's ok if emulated atomic operations are nested inside
+ * spinlocks. To avoid the danger of spinlocks and atomic using the same sema,
+ * we make sure "normal" spinlocks and atomics backed by spinlocks use
+ * distinct semaphores (see the nested argument to s_init_lock_sema).
  *
  * slock_t is just an int for this implementation; it holds the spinlock
- * number from 1..NUM_SPINLOCK_SEMAPHORES.  We intentionally ensure that 0
+ * number from 1..NUM_SIMULATION_SEMAPHORES.  We intentionally ensure that 0
  * is not a valid value, so that testing with this code can help find
  * failures to initialize spinlocks.
  */
 
+static inline void
+s_check_valid(int lockndx)
+{
+	if (unlikely(lockndx <= 0 || lockndx > NUM_SIMULATION_SEMAPHORES))
+		elog(ERROR, "invalid spinlock number: %d", lockndx);
+}
+
 void
 s_init_lock_sema(volatile slock_t *lock, bool nested)
 {
 	static uint32 counter = 0;
+	uint32		offset;
+	uint32		sema_total;
+	uint32		idx;
 
-	*lock = ((++counter) % NUM_SPINLOCK_SEMAPHORES) + 1;
+	if (nested)
+	{
+		/*
+		 * To allow nesting atomics inside spinlocked sections, use a
+		 * different spinlock. See comment above.
+		 */
+		offset = 1 + NUM_SPINLOCK_SEMAPHORES;
+		sema_total = NUM_ATOMICS_SEMAPHORES;
+	}
+	else
+	{
+		offset = 1;
+		sema_total = NUM_SPINLOCK_SEMAPHORES;
+	}
+
+	idx = (counter++ % sema_total) + offset;
+
+	/* double check we did things correctly */
+	s_check_valid(idx);
+
+	*lock = idx;
 }
 
 void
@@ -116,8 +153,8 @@ s_unlock_sema(volatile slock_t *lock)
 {
 	int			lockndx = *lock;
 
-	if (lockndx <= 0 || lockndx > NUM_SPINLOCK_SEMAPHORES)
-		elog(ERROR, "invalid spinlock number: %d", lockndx);
+	s_check_valid(lockndx);
+
 	PGSemaphoreUnlock(SpinlockSemaArray[lockndx - 1]);
 }
 
@@ -134,8 +171,8 @@ tas_sema(volatile slock_t *lock)
 {
 	int			lockndx = *lock;
 
-	if (lockndx <= 0 || lockndx > NUM_SPINLOCK_SEMAPHORES)
-		elog(ERROR, "invalid spinlock number: %d", lockndx);
+	s_check_valid(lockndx);
+
 	/* Note that TAS macros return 0 if *success* */
 	return !PGSemaphoreTryLock(SpinlockSemaArray[lockndx - 1]);
 }
diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index a48f9de2532..231aab9d569 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -794,6 +794,47 @@ test_atomic_uint64(void)
 	EXPECT_EQ_U64(pg_atomic_fetch_and_u64(&var, ~0), 0);
 }
 
+/*
+ * Verify that performing atomic ops inside a spinlock isn't a
+ * problem. Realistically that's only going to be a problem when both
+ * --disable-spinlocks and --disable-atomics are used, but it's cheap enough
+ * to just always test.
+ *
+ * The test works by initializing enough atomics that we'd conflict if there
+ * were an overlap between a spinlock and an atomic by holding a spinlock
+ * while manipulating more than NUM_SPINLOCK_SEMAPHORES atomics.
+ *
+ * NUM_TEST_ATOMICS doesn't really need to be more than
+ * NUM_SPINLOCK_SEMAPHORES, but it seems better to test a bit more
+ * extensively.
+ */
+static void
+test_atomic_spin_nest(void)
+{
+	slock_t lock;
+#define NUM_TEST_ATOMICS (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES + 27)
+	pg_atomic_uint32 atomics[NUM_TEST_ATOMICS];
+
+	SpinLockInit(&lock);
+
+	for (int i = 0; i < NUM_TEST_ATOMICS; i++)
+		pg_atomic_init_u32(&atomics[i], 0);
+
+	/* just so it's not all zeroes */
+	for (int i = 0; i < NUM_TEST_ATOMICS; i++)
+		EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&atomics[i], i), 0);
+
+	/* test whether we can do atomic op with lock held */
+	SpinLockAcquire(&lock);
+	for (int i = 0; i < NUM_TEST_ATOMICS; i++)
+	{
+		EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&atomics[i], i), i);
+		EXPECT_EQ_U32(pg_atomic_read_u32(&atomics[i]), 0);
+	}
+	SpinLockRelease(&lock);
+}
+#undef NUM_TEST_ATOMICS
+
 static void
 test_spinlock(void)
 {
@@ -891,6 +932,8 @@ test_atomic_ops(PG_FUNCTION_ARGS)
 
 	test_atomic_uint64();
 
+	test_atomic_spin_nest();
+
 	/* XXX: Is there a better location for this? */
 	test_spinlock();
 
-- 
2.25.0.114.g5b0ca878e0

Reply via email to