Andy Fan <zhihuifan1...@163.com> writes:

> I found a speical case about checking it in errstart. So commit 3 in v7
> is added. 
>
> commit 757c67c1d4895ce6a523bcf5217af8eb2351e2a1 (HEAD -> s_stuck_v3)
> Author: yizhi.fzh <yizhi....@alibaba-inc.com>
> Date:   Mon Jan 22 07:14:29 2024 +0800
>
>     Bypass SpinLock checking in SIGQUIT signal hander
>     

I used sigismember(&BlockSig, SIGQUIT) to detect if a process is doing a
quickdie, however this is bad not only because it doesn't work on
Windows, but also it has too poor performance even it impacts on
USE_ASSERT_CHECKING build only. In v8, I introduced a new global
variable quickDieInProgress to handle this.

-- 
Best Regards
Andy Fan

>From b6bb33994f479824a8fac6a1d076a103c16e9d69 Mon Sep 17 00:00:00 2001
From: "yizhi.fzh" <yizhi....@alibaba-inc.com>
Date: Fri, 19 Jan 2024 13:52:07 +0800
Subject: [PATCH v8 1/3] Detect more misuse of spin lock automatically

Spin lock are intended for *very* short-term locks, but it is possible
to be misused in many cases. e.g. Acquiring another LWLocks or regular
locks, memory allocation, errstart when holding a spin lock. this patch
would detect such misuse automatically in a USE_ASSERT_CHECKING build.

CHECK_FOR_INTERRUPTS should be avoided as well when holding a spin lock.
Depends on what signals are left to handle, PG may raise error/fatal
which would cause the code jump to some other places which is hardly to
release the spin lock anyway.
---
 src/backend/storage/lmgr/lock.c   |  6 ++++
 src/backend/storage/lmgr/lwlock.c |  6 ++++
 src/backend/storage/lmgr/spin.c   | 13 +++++++++
 src/backend/utils/error/elog.c    |  7 +++++
 src/backend/utils/mmgr/mcxt.c     | 16 +++++++++++
 src/include/miscadmin.h           | 12 +++++++-
 src/include/storage/spin.h        | 46 +++++++++++++++++++++++++++++--
 7 files changed, 102 insertions(+), 4 deletions(-)

diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index c70a1adb9a..cb9969b860 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -776,6 +776,12 @@ LockAcquireExtended(const LOCKTAG *locktag,
 	bool		found_conflict;
 	bool		log_lock = false;
 
+	/*
+	 * Spin lock should not be held for a long time, but the time needed here
+	 * may be too long, so let make sure no spin lock is held now.
+	 */
+	VerifyNoSpinLocksHeld();
+
 	if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
 		elog(ERROR, "unrecognized lock method: %d", lockmethodid);
 	lockMethodTable = LockMethods[lockmethodid];
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 2f2de5a562..1a24687394 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -1209,6 +1209,12 @@ LWLockAcquire(LWLock *lock, LWLockMode mode)
 
 	Assert(mode == LW_SHARED || mode == LW_EXCLUSIVE);
 
+	/*
+	 * Spin lock should not be held for a long time, but the time needed here
+	 * may be too long, so let make sure no spin lock is held now.
+	 */
+	VerifyNoSpinLocksHeld();
+
 	PRINT_LWDEBUG("LWLockAcquire", lock, mode);
 
 #ifdef LWLOCK_STATS
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 50cb99cd3b..08cc6da5d9 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -47,6 +47,9 @@ PGSemaphore *SpinlockSemaArray;
 
 #endif							/* HAVE_SPINLOCKS */
 
+volatile const char *last_spin_lock_file = NULL;
+volatile int last_spin_lock_lineno = -1;
+
 /*
  * Report the amount of shared memory needed to store semaphores for spinlock
  * support.
@@ -178,3 +181,13 @@ tas_sema(volatile slock_t *lock)
 }
 
 #endif							/* !HAVE_SPINLOCKS */
+
+void
+VerifyNoSpinLocksHeld(void)
+{
+#ifdef USE_ASSERT_CHECKING
+	if (last_spin_lock_file != NULL)
+		elog(PANIC, "A spin lock has been held at %s:%d",
+			 last_spin_lock_file, last_spin_lock_lineno);
+#endif
+}
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 2c7a20e3d3..22662955d2 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -351,6 +351,13 @@ errstart(int elevel, const char *domain)
 	bool		output_to_client = false;
 	int			i;
 
+	/*
+	 * Logging likely happens in many places without a outstanding attention,
+	 * and it's far more than a few dozen instructions, so it should be only
+	 * called when there is no spin lock is held.
+	 */
+	VerifyNoSpinLocksHeld();
+
 	/*
 	 * Check some cases in which we want to promote an error into a more
 	 * severe error.  None of this logic applies for non-error messages.
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 1336944084..7a14e347aa 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -1028,6 +1028,13 @@ MemoryContextAlloc(MemoryContext context, Size size)
 	if (!AllocSizeIsValid(size))
 		elog(ERROR, "invalid memory alloc request size %zu", size);
 
+	/*
+	 * Memory allocation likely happens in many places without a outstanding
+	 * attention, and it's far more than a few dozen instructions, so it
+	 * should be only called when there is no spin lock is held.
+	 */
+	VerifyNoSpinLocksHeld();
+
 	context->isReset = false;
 
 	ret = context->methods->alloc(context, size);
@@ -1071,6 +1078,9 @@ MemoryContextAllocZero(MemoryContext context, Size size)
 	if (!AllocSizeIsValid(size))
 		elog(ERROR, "invalid memory alloc request size %zu", size);
 
+	/* see comments in MemoryContextAlloc. */
+	VerifyNoSpinLocksHeld();
+
 	context->isReset = false;
 
 	ret = context->methods->alloc(context, size);
@@ -1197,6 +1207,9 @@ palloc(Size size)
 	if (!AllocSizeIsValid(size))
 		elog(ERROR, "invalid memory alloc request size %zu", size);
 
+	/* see comments in MemoryContextAlloc. */
+	VerifyNoSpinLocksHeld();
+
 	context->isReset = false;
 
 	ret = context->methods->alloc(context, size);
@@ -1228,6 +1241,9 @@ palloc0(Size size)
 	if (!AllocSizeIsValid(size))
 		elog(ERROR, "invalid memory alloc request size %zu", size);
 
+	/* see comments in MemoryContextAlloc. */
+	VerifyNoSpinLocksHeld();
+
 	context->isReset = false;
 
 	ret = context->methods->alloc(context, size);
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 0b01c1f093..26edf041ca 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -107,6 +107,9 @@ extern PGDLLIMPORT volatile uint32 CritSectionCount;
 /* in tcop/postgres.c */
 extern void ProcessInterrupts(void);
 
+/* in storage/spin.h */
+extern void VerifyNoSpinLocksHeld(void);
+
 /* Test whether an interrupt is pending */
 #ifndef WIN32
 #define INTERRUPTS_PENDING_CONDITION() \
@@ -117,9 +120,16 @@ extern void ProcessInterrupts(void);
 	 unlikely(InterruptPending))
 #endif
 
-/* Service interrupt, if one is pending and it's safe to service it now */
+/*
+ * Service interrupt, if one is pending and it's safe to service it now
+ * Spin lock doesn't have a overall infrastructure to release all the locks
+ * on Error. and ProcessInterrupts likely raises some ERROR or FATAL which
+ * makes the code jump to some other places, then spin lock is leaked
+ * Let's make sure no spin lock can be held at this point.
+ */
 #define CHECK_FOR_INTERRUPTS() \
 do { \
+	VerifyNoSpinLocksHeld(); \
 	if (INTERRUPTS_PENDING_CONDITION()) \
 		ProcessInterrupts(); \
 } while(0)
diff --git a/src/include/storage/spin.h b/src/include/storage/spin.h
index c0679c5999..b69424e9c8 100644
--- a/src/include/storage/spin.h
+++ b/src/include/storage/spin.h
@@ -59,12 +59,52 @@
 
 #define SpinLockInit(lock)	S_INIT_LOCK(lock)
 
-#define SpinLockAcquire(lock) S_LOCK(lock)
+extern PGDLLIMPORT volatile const char *last_spin_lock_file;
+extern PGDLLIMPORT volatile int last_spin_lock_lineno;
+extern void VerifyNoSpinLocksHeld(void);
 
-#define SpinLockRelease(lock) S_UNLOCK(lock)
+#ifdef USE_ASSERT_CHECKING
 
-#define SpinLockFree(lock)	S_LOCK_FREE(lock)
+/*
+ * START_SPIN_LOCK - the start of a spin lock acquiring.
+ *
+ * 	Acquiring another spin lock when holding one spin lock
+ * already is not allowed.
+ */
+#define START_SPIN_LOCK() \
+do \
+{ \
+	VerifyNoSpinLocksHeld(); \
+	last_spin_lock_file = __FILE__; \
+	last_spin_lock_lineno = __LINE__; \
+} while (0)
+
+#define END_SPIN_LOCK() \
+do \
+{ \
+	last_spin_lock_file = NULL; \
+	last_spin_lock_lineno = -1; \
+} while (0)
+#else
+#define START_SPIN_LOCK() ((void) true)
+#define END_SPIN_LOCK() ((void) true)
+#endif
+
+#define SpinLockAcquire(lock) \
+do \
+{ \
+	START_SPIN_LOCK(); \
+	S_LOCK(lock); \
+} while (false)
 
+#define SpinLockRelease(lock) \
+do \
+{ \
+	S_UNLOCK(lock); \
+	END_SPIN_LOCK(); \
+} while (false)
+
+#define SpinLockFree(lock)	S_LOCK_FREE(lock)
 
 extern int	SpinlockSemas(void);
 extern Size SpinlockSemaSize(void);
-- 
2.34.1

>From f09518df76572adca85cba5008ea0cae5074603a Mon Sep 17 00:00:00 2001
From: "yizhi.fzh" <yizhi....@alibaba-inc.com>
Date: Fri, 19 Jan 2024 13:57:46 +0800
Subject: [PATCH v8 2/3] Treat (un)LockBufHdr as a SpinLock.

The LockBufHdr also used init_local_spin_delay / perform_spin_delay
infrastructure and so it is also possible that PANIC the system
when it can't be acquired in a short time, and its code is pretty
similar with s_lock. so treat it same as SPIN lock when regarding to
misuse of spinlock detection.
---
 src/backend/storage/buffer/bufmgr.c | 1 +
 src/include/storage/buf_internals.h | 1 +
 2 files changed, 2 insertions(+)

diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 7d601bef6d..c600a113cf 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -5409,6 +5409,7 @@ LockBufHdr(BufferDesc *desc)
 
 	init_local_spin_delay(&delayStatus);
 
+	START_SPIN_LOCK();
 	while (true)
 	{
 		/* set BM_LOCKED flag */
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index f190e6e5e4..6dc444da49 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -363,6 +363,7 @@ UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
 {
 	pg_write_barrier();
 	pg_atomic_write_u32(&desc->state, buf_state & (~BM_LOCKED));
+	END_SPIN_LOCK();
 }
 
 /* in bufmgr.c */
-- 
2.34.1

>From bed5806c3adb3a3da3497f61ccf7668867ffb85a Mon Sep 17 00:00:00 2001
From: "yizhi.fzh" <yizhi....@alibaba-inc.com>
Date: Mon, 22 Jan 2024 07:14:29 +0800
Subject: [PATCH v8 3/3] bypass SpinLock checking in SIGQUIT signal hander

When a process receives a SIGQUIT signal, it indicates the system has a
crash time. It's possible that the process is just holding a Spin lock.
By our current rules, this process will PANIC with a misuse of spinlock
reason which is pretty prone to misunderstanding. so we need to bypass
the spin lock holding checking in this case. It is safe since the
overall system will be restarted.
---
 src/backend/storage/lmgr/spin.c |  6 ++++++
 src/backend/tcop/postgres.c     | 12 ++++++++++++
 2 files changed, 18 insertions(+)

diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 08cc6da5d9..006975bafa 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -26,6 +26,7 @@
 #include "storage/shmem.h"
 #include "storage/spin.h"
 
+extern PGDLLIMPORT volatile bool quickDieInProgress;
 
 #ifndef HAVE_SPINLOCKS
 
@@ -186,6 +187,11 @@ void
 VerifyNoSpinLocksHeld(void)
 {
 #ifdef USE_ASSERT_CHECKING
+	/*
+	 * In the quickdie progress, it's OK to ignore the spin lock checking.
+	 */
+	if (quickDieInProgress)
+		return;
 	if (last_spin_lock_file != NULL)
 		elog(PANIC, "A spin lock has been held at %s:%d",
 			 last_spin_lock_file, last_spin_lock_lineno);
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 1a34bd3715..11103835a6 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -102,6 +102,8 @@ int			PostAuthDelay = 0;
 /* Time between checks that the client is still connected. */
 int			client_connection_check_interval = 0;
 
+volatile bool quickDieInProgress = false;
+
 /* ----------------
  *		private typedefs etc
  * ----------------
@@ -2876,6 +2878,16 @@ quickdie(SIGNAL_ARGS)
 	sigaddset(&BlockSig, SIGQUIT);	/* prevent nested calls */
 	sigprocmask(SIG_SETMASK, &BlockSig, NULL);
 
+	/*
+	 * It's likely to check the BlockSig to know if it is doing a quickdie
+	 * with sigismember, but it is too expensive in test, so introduce
+	 * quickDieInProgress to avoid that.
+	 *
+	 * When this is set, we are sure this backend will die very soon, so no
+	 * need to reset it back to false anytime.
+	 */
+	quickDieInProgress = true;
+
 	/*
 	 * Prevent interrupts while exiting; though we just blocked signals that
 	 * would queue new interrupts, one may have been pending.  We don't want a
-- 
2.34.1

Reply via email to