On Wed, 3 Mar 2021 at 20:50, David Steele <da...@pgmasters.net> wrote:

> On 1/22/21 6:02 AM, Peter Eisentraut wrote:
>
> This patch set no longer applies:
> http://cfbot.cputube.org/patch_32_2927.log.
>
> Can we get a rebase? Also marked Waiting on Author.
>

Rebased as requested.

I'm still interested in whether Andres will be able to do anything about
identifying LWLocks in a cross-backend manner. But this work doesn't really
depend on that; it'd benefit from it, but would be easily adapted to it
later if needed.
From 36c7ddcbca2dbbcb2967f01cb92aa1f61620c838 Mon Sep 17 00:00:00 2001
From: Craig Ringer <craig.rin...@2ndquadrant.com>
Date: Thu, 19 Nov 2020 17:38:45 +0800
Subject: [PATCH 1/4] Pass the target LWLock* and tranche ID to LWLock
 tracepoints

Previously the TRACE_POSTGRESQL_LWLOCK_ tracepoints only received a
pointer to the LWLock tranche name. This made it impossible to identify
individual locks.

Passing the lock pointer itself isn't perfect. If the lock is allocated inside
a DSM segment then it might be mapped at a different address in different
backends. It's safe to compare lock pointers between backends (assuming
!EXEC_BACKEND) if they're in the individual lock tranches or an
extension-requested named tranche, but not necessarily for tranches in
BuiltinTrancheIds or tranches >= LWTRANCHE_FIRST_USER_DEFINED that were
directly assigned with LWLockNewTrancheId(). Still, it's better than nothing;
the pointer is stable within a backend, and usually between backends.
---
 src/backend/storage/lmgr/lwlock.c | 35 +++++++++++++++++++------------
 src/backend/utils/probes.d        | 18 +++++++++-------
 2 files changed, 32 insertions(+), 21 deletions(-)

diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 8cb6a6f042..5c8744d316 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -1321,7 +1321,8 @@ LWLockAcquire(LWLock *lock, LWLockMode mode)
 #endif
 
 		LWLockReportWaitStart(lock);
-		TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
+		TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode, lock,
+				lock->tranche);
 
 		for (;;)
 		{
@@ -1343,7 +1344,8 @@ LWLockAcquire(LWLock *lock, LWLockMode mode)
 		}
 #endif
 
-		TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
+		TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode, lock,
+				lock->tranche);
 		LWLockReportWaitEnd();
 
 		LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
@@ -1352,7 +1354,7 @@ LWLockAcquire(LWLock *lock, LWLockMode mode)
 		result = false;
 	}
 
-	TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), mode);
+	TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), mode, lock, lock->tranche);
 
 	/* Add lock to list of locks held by this backend */
 	held_lwlocks[num_held_lwlocks].lock = lock;
@@ -1403,14 +1405,16 @@ LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
 		RESUME_INTERRUPTS();
 
 		LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
-		TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(T_NAME(lock), mode);
+		TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(T_NAME(lock), mode, lock,
+				lock->tranche);
 	}
 	else
 	{
 		/* Add lock to list of locks held by this backend */
 		held_lwlocks[num_held_lwlocks].lock = lock;
 		held_lwlocks[num_held_lwlocks++].mode = mode;
-		TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(T_NAME(lock), mode);
+		TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(T_NAME(lock), mode, lock,
+				lock->tranche);
 	}
 	return !mustwait;
 }
@@ -1482,7 +1486,8 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
 #endif
 
 			LWLockReportWaitStart(lock);
-			TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
+			TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode, lock,
+					lock->tranche);
 
 			for (;;)
 			{
@@ -1500,7 +1505,8 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
 				Assert(nwaiters < MAX_BACKENDS);
 			}
 #endif
-			TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
+			TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode, lock,
+					lock->tranche);
 			LWLockReportWaitEnd();
 
 			LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
@@ -1530,7 +1536,8 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
 		/* Failed to get lock, so release interrupt holdoff */
 		RESUME_INTERRUPTS();
 		LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
-		TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(T_NAME(lock), mode);
+		TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(T_NAME(lock), mode, lock,
+				lock->tranche);
 	}
 	else
 	{
@@ -1538,7 +1545,8 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
 		/* Add lock to list of locks held by this backend */
 		held_lwlocks[num_held_lwlocks].lock = lock;
 		held_lwlocks[num_held_lwlocks++].mode = mode;
-		TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(T_NAME(lock), mode);
+		TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(T_NAME(lock), mode, lock,
+				lock->tranche);
 	}
 
 	return !mustwait;
@@ -1698,7 +1706,8 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
 #endif
 
 		LWLockReportWaitStart(lock);
-		TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), LW_EXCLUSIVE);
+		TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), LW_EXCLUSIVE, lock,
+				lock->tranche);
 
 		for (;;)
 		{
@@ -1717,7 +1726,7 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
 		}
 #endif
 
-		TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), LW_EXCLUSIVE);
+		TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), LW_EXCLUSIVE, lock, lock->tranche);
 		LWLockReportWaitEnd();
 
 		LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
@@ -1843,6 +1852,8 @@ LWLockRelease(LWLock *lock)
 	/* nobody else can have that kind of lock */
 	Assert(!(oldstate & LW_VAL_EXCLUSIVE));
 
+	/* Released, though not woken yet. All releases must fire this. */
+	TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(lock), mode, lock, lock->tranche);
 
 	/*
 	 * We're still waiting for backends to get scheduled, don't wake them up
@@ -1866,8 +1877,6 @@ LWLockRelease(LWLock *lock)
 		LWLockWakeup(lock);
 	}
 
-	TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(lock));
-
 	/*
 	 * Now okay to allow cancel/die interrupts.
 	 */
diff --git a/src/backend/utils/probes.d b/src/backend/utils/probes.d
index b0c50a3c7f..363839f85d 100644
--- a/src/backend/utils/probes.d
+++ b/src/backend/utils/probes.d
@@ -17,6 +17,7 @@
 #define LocalTransactionId unsigned int
 #define LWLockMode int
 #define LOCKMODE int
+#define LWLock void
 #define BlockNumber unsigned int
 #define Oid unsigned int
 #define ForkNumber int
@@ -28,14 +29,15 @@ provider postgresql {
 	probe transaction__commit(LocalTransactionId);
 	probe transaction__abort(LocalTransactionId);
 
-	probe lwlock__acquire(const char *, LWLockMode);
-	probe lwlock__release(const char *);
-	probe lwlock__wait__start(const char *, LWLockMode);
-	probe lwlock__wait__done(const char *, LWLockMode);
-	probe lwlock__condacquire(const char *, LWLockMode);
-	probe lwlock__condacquire__fail(const char *, LWLockMode);
-	probe lwlock__acquire__or__wait(const char *, LWLockMode);
-	probe lwlock__acquire__or__wait__fail(const char *, LWLockMode);
+	probe lwlock__acquire(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__release(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__wait__start(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__wait__done(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__condacquire(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__condacquire__fail(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__acquire__or__wait(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__acquire__or__wait__fail(const char *, LWLockMode, LWLock*, int);
+
 
 	probe lock__wait__start(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE);
 	probe lock__wait__done(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE);
-- 
2.29.2

From 16a9da60cdf7b8eaf2f5fed82e4ab79cfdf33a6a Mon Sep 17 00:00:00 2001
From: Craig Ringer <craig.rin...@2ndquadrant.com>
Date: Mon, 11 Jan 2021 10:33:10 +0800
Subject: [PATCH 4/4] SDT tracepoints for backend type and postmaster pid on
 startup

For collecting stats on things like LWLock activity it's good to be able
to identify a backend reliably, even without debuginfo available.
---
 src/backend/utils/init/miscinit.c  | 5 +++++
 src/backend/utils/misc/guc.c       | 3 +++
 src/backend/utils/misc/ps_status.c | 9 +++++++++
 src/backend/utils/probes.d         | 5 +++++
 4 files changed, 22 insertions(+)

diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 8b73850d0d..01b5cccf85 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -233,6 +233,11 @@ GetBackendTypeDesc(BackendType backendType)
 {
 	const char *backendDesc = "unknown process type";
 
+	if (MyProcPid == PostmasterPid)
+		return "postmaster";
+	else if (!IsUnderPostmaster)
+		return "standalone";
+
 	switch (backendType)
 	{
 		case B_INVALID:
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index e337df42cb..b34cfcefdb 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -95,6 +95,7 @@
 #include "utils/pg_lsn.h"
 #include "utils/plancache.h"
 #include "utils/portal.h"
+#include "utils/probes.h"
 #include "utils/ps_status.h"
 #include "utils/rls.h"
 #include "utils/snapmgr.h"
@@ -11834,6 +11835,8 @@ assign_application_name(const char *newval, void *extra)
 {
 	/* Update the pg_stat_activity view */
 	pgstat_report_appname(newval);
+
+	TRACE_POSTGRESQL_GUC_APPLICATION_NAME_ASSIGNED(newval);
 }
 
 static bool
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 5819faaf2d..f6ea64a233 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -31,6 +31,7 @@
 #include "pgstat.h"
 #include "utils/guc.h"
 #include "utils/ps_status.h"
+#include "utils/probes.h"
 
 extern char **environ;
 bool		update_process_title = true;
@@ -265,6 +266,14 @@ init_ps_display(const char *fixed_part)
 	if (!fixed_part)
 		fixed_part = GetBackendTypeDesc(MyBackendType);
 
+	/*
+	 * init_ps_display() is a useful time to report the backend type to
+	 * tracing tools, since otherwise the probe would have to appear in
+	 * many different main loops for different backend types.
+	 */
+	TRACE_POSTGRESQL_BACKEND_TYPE(MyBackendType, fixed_part);
+	TRACE_POSTGRESQL_POSTMASTER_PID(PostmasterPid);
+
 #ifndef PS_USE_NONE
 	/* no ps display for stand-alone backend */
 	if (!IsUnderPostmaster)
diff --git a/src/backend/utils/probes.d b/src/backend/utils/probes.d
index 608a89af91..0157c3432f 100644
--- a/src/backend/utils/probes.d
+++ b/src/backend/utils/probes.d
@@ -101,4 +101,9 @@ provider postgresql {
 	probe wal__switch();
 	probe wal__buffer__write__dirty__start();
 	probe wal__buffer__write__dirty__done();
+
+	/* Useful probes for startup and process info, for when tracing a group of processes */
+	probe backend__type(int backend_type, const char * backend_type_description);
+	probe postmaster__pid(int postmaster_pid);
+	probe guc__application__name__assigned(const char *newval);
 };
-- 
2.29.2

From c358e8fee0c01d2d459365b93a04e57d945c9cfe Mon Sep 17 00:00:00 2001
From: Craig Ringer <craig.rin...@2ndquadrant.com>
Date: Thu, 19 Nov 2020 17:30:47 +0800
Subject: [PATCH 3/4] Comments on LWLock tranches

---
 src/backend/storage/lmgr/lwlock.c | 49 +++++++++++++++++++++++++++++--
 1 file changed, 46 insertions(+), 3 deletions(-)

diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 4c65a30c76..26ea3e64a9 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -112,11 +112,14 @@ extern slock_t *ShmemLock;
  *
  * 1. The individually-named locks defined in lwlocknames.h each have their
  * own tranche.  The names of these tranches appear in IndividualLWLockNames[]
- * in lwlocknames.c.
+ * in lwlocknames.c. The LWLock structs are allocated in MainLWLockArray.
  *
  * 2. There are some predefined tranches for built-in groups of locks.
  * These are listed in enum BuiltinTrancheIds in lwlock.h, and their names
- * appear in BuiltinTrancheNames[] below.
+ * appear in BuiltinTrancheNames[] below. The LWLock structs are allocated
+ * elsewhere under the control of the subsystem that manages the tranche. The
+ * LWLock code does not know or care where in shared memory they are allocated
+ * or how many there are in a tranche.
  *
  * 3. Extensions can create new tranches, via either RequestNamedLWLockTranche
  * or LWLockRegisterTranche.  The names of these that are known in the current
@@ -196,6 +199,13 @@ static int	LWLockTrancheNamesAllocated = 0;
  * This points to the main array of LWLocks in shared memory.  Backends inherit
  * the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
  * where we have special measures to pass it down).
+ *
+ * This array holds individual LWLocks and LWLocks allocated in named tranches.
+ *
+ * It does not hold locks for any LWLock that's separately initialized with
+ * LWLockInitialize(). Locks in tranches listed in BuiltinTrancheIds or
+ * allocated with LWLockNewTrancheId() can be embedded in other structs
+ * anywhere in shared memory.
  */
 LWLockPadded *MainLWLockArray = NULL;
 
@@ -592,6 +602,12 @@ InitLWLockAccess(void)
  * Caller needs to retrieve the requested number of LWLocks starting from
  * the base lock address returned by this API.  This can be used for
  * tranches that are requested by using RequestNamedLWLockTranche() API.
+ *
+ * The locks are already initialized.
+ *
+ * This function can not be used for locks in builtin tranches or tranches
+ * registered with LWLockRegisterTranche(). There is no way to look those locks
+ * up by name.
  */
 LWLockPadded *
 GetNamedLWLockTranche(const char *tranche_name)
@@ -646,6 +662,14 @@ LWLockNewTrancheId(void)
  *
  * The tranche name will be user-visible as a wait event name, so try to
  * use a name that fits the style for those.
+ *
+ * The tranche ID should be a user-defined tranche ID acquired from
+ * LWLockNewTrancheId(). It is not necessary to call this for tranches
+ * allocated by RequestNamedLWLockTranche().
+ *
+ * The LWLock subsystem does not know where LWLock(s) that will be assigned to
+ * this tranche are stored, or how many of them there are. The caller allocates
+ * suitable shared memory storage and initializes locks with LWLockInitialize().
  */
 void
 LWLockRegisterTranche(int tranche_id, const char *tranche_name)
@@ -698,6 +722,10 @@ LWLockRegisterTranche(int tranche_id, const char *tranche_name)
  *
  * The tranche name will be user-visible as a wait event name, so try to
  * use a name that fits the style for those.
+ *
+ * The LWLocks allocated here are retrieved after shmem startup using
+ * GetNamedLWLockTranche(). They are intialized during shared memory startup so
+ * it is not necessary to call LWLockInitialize() on them.
  */
 void
 RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
@@ -738,10 +766,17 @@ RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
 
 /*
  * LWLockInitialize - initialize a new lwlock; it's initially unlocked
+ *
+ * For callers outside the LWLock subsystem itself, the tranche ID must either
+ * be a BuiltinTrancheIds entry for the calling subsysytem or a tranche ID
+ * assigned with LWLockNewTrancheId().
  */
 void
 LWLockInitialize(LWLock *lock, int tranche_id)
 {
+	/* Re-initialization of individual LWLocks is not permitted */
+	Assert(tranche_id >= NUM_INDIVIDUAL_LWLOCKS || !IsUnderPostmaster);
+
 	pg_atomic_init_u32(&lock->state, LW_FLAG_RELEASE_OK);
 #ifdef LOCK_DEBUG
 	pg_atomic_init_u32(&lock->nwaiters, 0);
@@ -802,6 +837,11 @@ GetLWTrancheName(uint16 trancheId)
 
 /*
  * Return an identifier for an LWLock based on the wait class and event.
+ *
+ * Note that there's no way to identify a individual LWLock within a tranche by
+ * anything except its address. The LWLock subsystem doesn't know how many
+ * locks there are in all tranches and there's no requirement that they be
+ * stored in contiguous arrays.
  */
 const char *
 GetLWLockIdentifier(uint32 classId, uint16 eventId)
@@ -1009,7 +1049,7 @@ LWLockWakeup(LWLock *lock)
 
 	Assert(proclist_is_empty(&wakeup) || pg_atomic_read_u32(&lock->state) & LW_FLAG_HAS_WAITERS);
 
-	/* unset required flags, and release lock, in one fell swoop */
+	/* unset required flags, and release waitlist lock, in one fell swoop */
 	{
 		uint32		old_state;
 		uint32		desired_state;
@@ -1836,6 +1876,9 @@ LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
 
 /*
  * LWLockRelease - release a previously acquired lock
+ *
+ * The actual lock acquire corresponding to this release happens in
+ * LWLockAttemptLock().
  */
 void
 LWLockRelease(LWLock *lock)
-- 
2.29.2

From af1b0ebcce96c2a81b4a7d88e71ac719477440a8 Mon Sep 17 00:00:00 2001
From: Craig Ringer <craig.rin...@2ndquadrant.com>
Date: Thu, 19 Nov 2020 18:05:39 +0800
Subject: [PATCH 2/4] Add to the tracepoints in LWLock routines

The existing tracepoints in lwlock.c didn't mark the start of LWLock
acquisition, so timing of the full LWLock acquire cycle wasn't
possible without relying on debuginfo. Since this can be quite
relevant for production performance issues, emit tracepoints at the
start of LWLock acquire.

Also add a tracepoint that's fired for all LWLock acquisitions at the
moment the shared memory state changes, whether done by LWLockAcquire
or LWLockConditionalAcquire. This lets tools reliably track which
backends hold which LWLocks even if we add new functions that acquire
LWLocks in future.

Add tracepoints in LWLockWaitForVar and LWLockUpdateVar so process
interaction around LWLock variable waits can be observed from trace
tooling. They can cause long waits and/or deadlocks, so it's worth
being able to time and track them.
---
 src/backend/storage/lmgr/lwlock.c | 24 ++++++++++++++++++++++++
 src/backend/utils/probes.d        |  8 ++++++++
 2 files changed, 32 insertions(+)

diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 5c8744d316..4c65a30c76 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -874,6 +874,9 @@ LWLockAttemptLock(LWLock *lock, LWLockMode mode)
 				if (mode == LW_EXCLUSIVE)
 					lock->owner = MyProc;
 #endif
+				/* All LWLock acquires must hit this tracepoint */
+				TRACE_POSTGRESQL_LWLOCK_ACQUIRED(T_NAME(lock), mode, lock,
+						lock->tranche);
 				return false;
 			}
 			else
@@ -1237,6 +1240,9 @@ LWLockAcquire(LWLock *lock, LWLockMode mode)
 	if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
 		elog(ERROR, "too many LWLocks taken");
 
+	TRACE_POSTGRESQL_LWLOCK_ACQUIRE_START(T_NAME(lock), mode, lock,
+			lock->tranche);
+
 	/*
 	 * Lock out cancel/die interrupts until we exit the code section protected
 	 * by the LWLock.  This ensures that interrupts will not interfere with
@@ -1389,6 +1395,9 @@ LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
 	if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
 		elog(ERROR, "too many LWLocks taken");
 
+	TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_START(T_NAME(lock), mode, lock,
+			lock->tranche);
+
 	/*
 	 * Lock out cancel/die interrupts until we exit the code section protected
 	 * by the LWLock.  This ensures that interrupts will not interfere with
@@ -1453,6 +1462,9 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
 	if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
 		elog(ERROR, "too many LWLocks taken");
 
+	TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_START(T_NAME(lock), mode, lock,
+			lock->tranche);
+
 	/*
 	 * Lock out cancel/die interrupts until we exit the code section protected
 	 * by the LWLock.  This ensures that interrupts will not interfere with
@@ -1635,6 +1647,9 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
 
 	PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
 
+	TRACE_POSTGRESQL_LWLOCK_WAITFORVAR_START(T_NAME(lock), lock,
+			lock->tranche, valptr, oldval, *valptr);
+
 	/*
 	 * Lock out cancel/die interrupts while we sleep on the lock.  There is no
 	 * cleanup mechanism to remove us from the wait queue if we got
@@ -1745,6 +1760,9 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
 	 */
 	RESUME_INTERRUPTS();
 
+	TRACE_POSTGRESQL_LWLOCK_WAITFORVAR_DONE(T_NAME(lock), lock, lock->tranche,
+			valptr, oldval, *newval, result);
+
 	return result;
 }
 
@@ -1767,6 +1785,9 @@ LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
 
 	PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
 
+	TRACE_POSTGRESQL_LWLOCK_UPDATEVAR_START(T_NAME(lock), lock, lock->tranche, valptr,
+			val);
+
 	proclist_init(&wakeup);
 
 	LWLockWaitListLock(lock);
@@ -1807,6 +1828,9 @@ LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
 		waiter->lwWaiting = false;
 		PGSemaphoreUnlock(waiter->sem);
 	}
+
+	TRACE_POSTGRESQL_LWLOCK_UPDATEVAR_DONE(T_NAME(lock), lock, lock->tranche,
+			valptr, val);
 }
 
 
diff --git a/src/backend/utils/probes.d b/src/backend/utils/probes.d
index 363839f85d..608a89af91 100644
--- a/src/backend/utils/probes.d
+++ b/src/backend/utils/probes.d
@@ -29,14 +29,22 @@ provider postgresql {
 	probe transaction__commit(LocalTransactionId);
 	probe transaction__abort(LocalTransactionId);
 
+	probe lwlock__acquired(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__acquire__start(const char *, LWLockMode, LWLock*, int);
 	probe lwlock__acquire(const char *, LWLockMode, LWLock*, int);
 	probe lwlock__release(const char *, LWLockMode, LWLock*, int);
 	probe lwlock__wait__start(const char *, LWLockMode, LWLock*, int);
 	probe lwlock__wait__done(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__condacquire__start(const char *, LWLockMode, LWLock*, int);
 	probe lwlock__condacquire(const char *, LWLockMode, LWLock*, int);
 	probe lwlock__condacquire__fail(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__acquire__or__wait__start(const char *, LWLockMode, LWLock*, int);
 	probe lwlock__acquire__or__wait(const char *, LWLockMode, LWLock*, int);
 	probe lwlock__acquire__or__wait__fail(const char *, LWLockMode, LWLock*, int);
+	probe lwlock__waitforvar__start(const char *, LWLock*, int, uint64, uint64, uint64);
+	probe lwlock__waitforvar__done(const char *, LWLock*, int, uint64, uint64, uint64, bool);
+	probe lwlock__updatevar__start(const char *, LWLock*, int, uint64, uint64);
+	probe lwlock__updatevar__done(const char *, LWLock*, int, uint64, uint64);
 
 
 	probe lock__wait__start(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, LOCKMODE);
-- 
2.29.2

Reply via email to