This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git

commit 32c515ed94aded92566ee727fad4ef860ea635d0
Author: liushengsong <[email protected]>
AuthorDate: Fri Mar 6 09:51:20 2026 +0800

    feat: add pg_stat_resqueues view for resource queue statistics
    
    Adds a new PGSTAT_KIND_RESQUEUE statistics kind that tracks per-queue
    throughput and latency counters, exposed through the pg_stat_resqueues
    view and pg_stat_get_resqueue_stats() SQL function.
    
    New statistics tracked per resource queue:
    - queries_submitted / admitted / rejected / completed
    - elapsed_wait_secs / max_wait_secs
    - elapsed_exec_secs / max_exec_secs
    - total_cost / total_memory_kb
    - stat_reset_timestamp
    
    Implementation notes:
    - All stat updates use pgstat_get_entry_ref_locked() to write directly
      to shared memory, bypassing the pending/flush mechanism.  This makes
      stats immediately visible without explicit flushes, and is safe when
      called from PG_CATCH blocks (pgstat_resqueue_rejected is invoked
      inside PG_CATCH in ResLockPortal/ResLockUtilityPortal).
    - Per-portal timing is tracked in a backend-local hash table keyed by
      portalid; entries are removed when the portal is admitted, rejected,
      or completed.
    
    Also fixes a pre-existing bug in resqueue.c: dclist_delete_from()
    already decrements waitProcs.count internally, but three call sites
    additionally did a manual count-- causing count to underflow to
    UINT32_MAX.  The next dclist_push_tail() would then overflow count
    back to 0 and trip the overflow assertion in ResProcSleep, crashing
    the backend.
    
    New files:
    - src/backend/utils/activity/pgstat_resqueue.c
    - src/test/isolation2/sql/resqueue_stats.sql
    - src/test/isolation2/expected/resqueue_stats.out
    
    Co-Authored-By: Claude Sonnet 4.6 <[email protected]>
---
 src/backend/catalog/system_views.sql               |  19 +
 src/backend/postmaster/autovacuum.c                |   7 +-
 src/backend/utils/activity/Makefile                |   1 +
 src/backend/utils/activity/pgstat.c                |  16 +
 src/backend/utils/activity/pgstat_resqueue.c       | 388 +++++++++++++++++++++
 src/backend/utils/adt/pgstatfuncs.c                |  78 +++++
 src/backend/utils/resscheduler/resqueue.c          |   9 +-
 src/backend/utils/resscheduler/resscheduler.c      |  64 +++-
 src/include/catalog/pg_proc.dat                    |   8 +
 src/include/pgstat.h                               |  74 +++-
 src/include/utils/pgstat_internal.h                |  31 ++
 .../expected/resource_queue_deadlock.out           |   8 +-
 .../expected/resource_queue_multi_portal.out       |   6 +-
 src/test/isolation2/expected/resqueue_stats.out    |  92 +++++
 src/test/isolation2/init_file_isolation2           |   4 +
 .../isolation2/sql/resource_queue_deadlock.sql     |   2 +
 src/test/isolation2/sql/resqueue_stats.sql         |  72 ++++
 17 files changed, 850 insertions(+), 29 deletions(-)

diff --git a/src/backend/catalog/system_views.sql 
b/src/backend/catalog/system_views.sql
index 505c817bc07..d4f0474cdc0 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -1224,6 +1224,25 @@ CREATE VIEW pg_resqueue_status AS
              queueholders int4)
             ON (s.queueid = q.oid);
 
+-- Resource queue cumulative statistics view
+CREATE VIEW pg_stat_resqueues AS
+    SELECT
+        q.oid                           AS queueid,
+        q.rsqname                       AS queuename,
+        s.queries_submitted,
+        s.queries_admitted,
+        s.queries_rejected,
+        s.queries_completed,
+        s.elapsed_wait_secs             AS total_wait_time_secs,
+        s.max_wait_secs,
+        s.elapsed_exec_secs             AS total_exec_time_secs,
+        s.max_exec_secs,
+        s.total_cost,
+        s.total_memory_kb,
+        s.stat_reset_timestamp
+    FROM pg_resqueue AS q,
+         pg_stat_get_resqueue_stats(q.oid) AS s;
+
 -- External table views
 
 CREATE VIEW pg_max_external_files AS
diff --git a/src/backend/postmaster/autovacuum.c 
b/src/backend/postmaster/autovacuum.c
index 7372a51a373..f517cb4d006 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -676,16 +676,17 @@ AutoVacLauncherMain(int argc, char *argv[])
  * the database chosen is connectable, the launcher will never select it and 
the
  * worker will continue to signal for a new launcher.
  */
-#if 0
        /*
         * Even when system is configured to use a different fetch consistency,
         * for autovac we always want fresh stats.
         */
        SetConfigOption("stats_fetch_consistency", "none", PGC_SUSET, 
PGC_S_OVERRIDE);
 
+#if 0
        /*
-        * In emergency mode, just start a worker (unless shutdown was 
requested)
-        * and go away.
+        * In GPDB, we only want an autovacuum worker to start once we know
+        * there is a database to vacuum. Therefore, we never want emergency 
mode
+        * to start a worker immediately.
         */
        if (!AutoVacuumingActive())
        {
diff --git a/src/backend/utils/activity/Makefile 
b/src/backend/utils/activity/Makefile
index 7d7482dde02..d7d7d6c6b0d 100644
--- a/src/backend/utils/activity/Makefile
+++ b/src/backend/utils/activity/Makefile
@@ -25,6 +25,7 @@ OBJS = \
        pgstat_io.o \
        pgstat_relation.o \
        pgstat_replslot.o \
+       pgstat_resqueue.o \
        pgstat_shmem.o \
        pgstat_slru.o \
        pgstat_subscription.o \
diff --git a/src/backend/utils/activity/pgstat.c 
b/src/backend/utils/activity/pgstat.c
index 20c3f3bbb40..ab7768154c2 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -337,6 +337,22 @@ static const PgStat_KindInfo 
pgstat_kind_infos[PGSTAT_NUM_KINDS] = {
                .reset_timestamp_cb = pgstat_subscription_reset_timestamp_cb,
        },
 
+       [PGSTAT_KIND_RESQUEUE] = {
+               .name = "resqueue",
+
+               .fixed_amount = false,
+               /* resource queues are cluster-wide objects, visible across 
databases */
+               .accessed_across_databases = true,
+
+               .shared_size = sizeof(PgStatShared_ResQueue),
+               .shared_data_off = offsetof(PgStatShared_ResQueue, stats),
+               .shared_data_len = sizeof(((PgStatShared_ResQueue *) 0)->stats),
+               .pending_size = sizeof(PgStat_ResQueueCounts),
+
+               .flush_pending_cb = pgstat_resqueue_flush_cb,
+               .reset_timestamp_cb = pgstat_resqueue_reset_timestamp_cb,
+       },
+
 
        /* stats for fixed-numbered (mostly 1) objects */
 
diff --git a/src/backend/utils/activity/pgstat_resqueue.c 
b/src/backend/utils/activity/pgstat_resqueue.c
new file mode 100644
index 00000000000..0ca7f3a94f6
--- /dev/null
+++ b/src/backend/utils/activity/pgstat_resqueue.c
@@ -0,0 +1,388 @@
+/* -------------------------------------------------------------------------
+ *
+ * pgstat_resqueue.c
+ *       Implementation of resource queue statistics.
+ *
+ * Each backend maintains a hash table (keyed by portalid) of
+ * PgStat_ResQueuePortalEntry structs for in-flight portals.  When a portal
+ * finishes (admitted, rejected, or completed), its timing deltas are
+ * accumulated into per-queue PgStat_ResQueueCounts pending data, which is
+ * eventually flushed into the shared-memory PgStatShared_ResQueue entry by
+ * pgstat_report_stat().
+ *
+ * Time is tracked at second granularity (via time()) to keep overhead low.
+ *
+ * Portions Copyright (c) 2006-2010, Greenplum inc.
+ * Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
+ * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ *       src/backend/utils/activity/pgstat_resqueue.c
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include <time.h>
+
+#include "pgstat.h"
+#include "utils/hsearch.h"
+#include "utils/memutils.h"
+#include "utils/pgstat_internal.h"
+
+
+/* ----------
+ * Backend-local hash table of in-flight portal entries.
+ * Keyed by portalid (uint32).
+ * ----------
+ */
+static HTAB *pgStatResQueuePortalHash = NULL;
+
+
+/* ----------
+ * pgstat_resqueue_portal_hash_init
+ *
+ * Lazily initialise the backend-local portal tracking hash.
+ * ----------
+ */
+static void
+pgstat_resqueue_portal_hash_init(void)
+{
+       HASHCTL         ctl;
+
+       if (pgStatResQueuePortalHash != NULL)
+               return;
+
+       MemSet(&ctl, 0, sizeof(ctl));
+       ctl.keysize = sizeof(uint32);
+       ctl.entrysize = sizeof(PgStat_ResQueuePortalEntry);
+       ctl.hcxt = TopMemoryContext;
+
+       pgStatResQueuePortalHash = hash_create("ResQueue portal stats",
+                                                                               
   16,
+                                                                               
   &ctl,
+                                                                               
   HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+}
+
+/* ----------
+ * pgstat_resqueue_wait_start
+ *
+ * Called just before ResLockAcquire() when a portal is about to enter the
+ * resource queue.  Records the wait-start timestamp and resource parameters,
+ * and immediately writes the submission counter to shared stats.
+ *
+ * We write directly to shared stats (bypassing the pending mechanism) so
+ * that stats are immediately visible to other sessions without requiring an
+ * explicit flush.  This also keeps the code safe regardless of the calling
+ * context.
+ * ----------
+ */
+void
+pgstat_resqueue_wait_start(uint32 portalid, Oid queueid,
+                                                  Cost query_cost, int64 
query_memory_kb)
+{
+       PgStat_ResQueuePortalEntry *entry;
+       bool            found;
+       PgStat_EntryRef *entry_ref;
+       PgStatShared_ResQueue *shqent;
+
+       /* Skip stat update if pgstat shared memory is already detached. */
+       if (pgStatLocal.shared_hash == NULL)
+               return;
+
+       pgstat_resqueue_portal_hash_init();
+
+       entry = (PgStat_ResQueuePortalEntry *)
+               hash_search(pgStatResQueuePortalHash, &portalid, HASH_ENTER, 
&found);
+
+       /* If a stale entry exists (e.g. from a prior run), overwrite it. */
+       entry->portalid = portalid;
+       entry->queueid = queueid;
+       entry->t_wait_start = time(NULL);
+       entry->t_exec_start = 0;
+       entry->query_cost = query_cost;
+       entry->query_memory_kb = query_memory_kb;
+
+       /* Write submission counters directly to shared stats. */
+       entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_RESQUEUE,
+                                                                               
        InvalidOid, queueid, false);
+       if (entry_ref == NULL)
+               return;
+
+       shqent = (PgStatShared_ResQueue *) entry_ref->shared_stats;
+       shqent->stats.queries_submitted++;
+       shqent->stats.total_cost += (PgStat_Counter) query_cost;
+       shqent->stats.total_memory_kb += query_memory_kb;
+       pgstat_unlock_entry(entry_ref);
+}
+
+/* ----------
+ * pgstat_resqueue_wait_end
+ *
+ * Called after ResLockAcquire() returns successfully (portal admitted).
+ * Records the exec-start timestamp and counts the admission directly in
+ * shared stats.
+ * ----------
+ */
+void
+pgstat_resqueue_wait_end(uint32 portalid)
+{
+       PgStat_ResQueuePortalEntry *entry;
+       bool            found;
+       time_t          now;
+       time_t          wait_secs;
+       PgStat_EntryRef *entry_ref;
+       PgStatShared_ResQueue *shqent;
+
+       if (pgStatResQueuePortalHash == NULL)
+               return;
+
+       entry = (PgStat_ResQueuePortalEntry *)
+               hash_search(pgStatResQueuePortalHash, &portalid, HASH_FIND, 
&found);
+
+       if (!found)
+               return;
+
+       now = time(NULL);
+       entry->t_exec_start = now;
+
+       wait_secs = (entry->t_wait_start > 0) ? (now - entry->t_wait_start) : 0;
+       if (wait_secs < 0)
+               wait_secs = 0;
+
+       entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_RESQUEUE,
+                                                                               
        InvalidOid, entry->queueid, false);
+       if (entry_ref == NULL)
+               return;
+
+       shqent = (PgStatShared_ResQueue *) entry_ref->shared_stats;
+       shqent->stats.queries_admitted++;
+       shqent->stats.elapsed_wait_secs += (PgStat_Counter) wait_secs;
+       if ((PgStat_Counter) wait_secs > shqent->stats.max_wait_secs)
+               shqent->stats.max_wait_secs = (PgStat_Counter) wait_secs;
+       pgstat_unlock_entry(entry_ref);
+}
+
+/* ----------
+ * pgstat_resqueue_rejected
+ *
+ * Called when ResLockAcquire() raises an error (portal cancelled or error
+ * while waiting), or when the portal is bypassed (cost below threshold).
+ * Removes the portal entry without counting exec time.
+ *
+ * IMPORTANT: This function may be called from inside a PG_CATCH block.
+ * It must NOT call pgstat_prep_pending_entry(), which modifies the global
+ * pgStatPending dlist and allocates memory that may be unsafe to use during
+ * error recovery.  Instead, we update shared stats directly via
+ * pgstat_get_entry_ref_locked(), which is PG_CATCH-safe because it only
+ * allocates from TopMemoryContext derivatives and uses LWLock operations.
+ * ----------
+ */
+void
+pgstat_resqueue_rejected(uint32 portalid)
+{
+       PgStat_ResQueuePortalEntry *entry;
+       bool            found;
+       time_t          now;
+       time_t          wait_secs;
+       Oid                     queueid;
+       PgStat_EntryRef *entry_ref;
+       PgStatShared_ResQueue *shqent;
+
+       if (pgStatResQueuePortalHash == NULL)
+               return;
+
+       entry = (PgStat_ResQueuePortalEntry *)
+               hash_search(pgStatResQueuePortalHash, &portalid, HASH_FIND, 
&found);
+
+       if (!found)
+               return;
+
+       now = time(NULL);
+       wait_secs = (entry->t_wait_start > 0) ? (now - entry->t_wait_start) : 0;
+       if (wait_secs < 0)
+               wait_secs = 0;
+
+       queueid = entry->queueid;
+
+       /* Remove portal entry first — hash_search(HASH_REMOVE) is 
PG_CATCH-safe. */
+       hash_search(pgStatResQueuePortalHash, &portalid, HASH_REMOVE, NULL);
+
+       /* Skip stat update if pgstat shared memory is already detached. */
+       if (pgStatLocal.shared_hash == NULL)
+               return;
+
+       /*
+        * Update the shared stats entry directly, bypassing the pending
+        * mechanism.  pgstat_get_entry_ref_locked allocates only from
+        * TopMemoryContext derivatives and takes an LWLock, both of which are
+        * safe during error recovery.
+        */
+       entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_RESQUEUE,
+                                                                               
        InvalidOid, queueid, false);
+       if (entry_ref == NULL)
+               return;
+
+       shqent = (PgStatShared_ResQueue *) entry_ref->shared_stats;
+       shqent->stats.queries_rejected++;
+       shqent->stats.elapsed_wait_secs += (PgStat_Counter) wait_secs;
+       if ((PgStat_Counter) wait_secs > shqent->stats.max_wait_secs)
+               shqent->stats.max_wait_secs = (PgStat_Counter) wait_secs;
+
+       pgstat_unlock_entry(entry_ref);
+}
+
+/* ----------
+ * pgstat_resqueue_exec_end
+ *
+ * Called from ResUnLockPortal() when a portal finishes execution (normal
+ * completion, error, or cancel after admission).  Writes completion counters
+ * directly to shared stats.
+ * ----------
+ */
+void
+pgstat_resqueue_exec_end(uint32 portalid)
+{
+       PgStat_ResQueuePortalEntry *entry;
+       bool            found;
+       time_t          now;
+       time_t          exec_secs;
+       PgStat_EntryRef *entry_ref;
+       PgStatShared_ResQueue *shqent;
+
+       if (pgStatResQueuePortalHash == NULL)
+               return;
+
+       /*
+        * pgstat_shutdown_hook (before_shmem_exit) runs before ProcKill
+        * (on_shmem_exit).  If AtExitCleanup_ResPortals calls us after pgstat
+        * has detached from shared memory, skip the stat update but still clean
+        * up the local hash entry to avoid a memory leak.
+        */
+       if (pgStatLocal.shared_hash == NULL)
+       {
+               hash_search(pgStatResQueuePortalHash, &portalid, HASH_REMOVE, 
NULL);
+               return;
+       }
+
+       entry = (PgStat_ResQueuePortalEntry *)
+               hash_search(pgStatResQueuePortalHash, &portalid, HASH_FIND, 
&found);
+
+       if (!found)
+               return;
+
+       now = time(NULL);
+       exec_secs = (entry->t_exec_start > 0) ? (now - entry->t_exec_start) : 0;
+       if (exec_secs < 0)
+               exec_secs = 0;
+
+       entry_ref = pgstat_get_entry_ref_locked(PGSTAT_KIND_RESQUEUE,
+                                                                               
        InvalidOid, entry->queueid, false);
+       if (entry_ref != NULL)
+       {
+               shqent = (PgStatShared_ResQueue *) entry_ref->shared_stats;
+               shqent->stats.queries_completed++;
+               shqent->stats.elapsed_exec_secs += (PgStat_Counter) exec_secs;
+               if ((PgStat_Counter) exec_secs > shqent->stats.max_exec_secs)
+                       shqent->stats.max_exec_secs = (PgStat_Counter) 
exec_secs;
+               pgstat_unlock_entry(entry_ref);
+       }
+
+       hash_search(pgStatResQueuePortalHash, &portalid, HASH_REMOVE, NULL);
+}
+
+/* ----------
+ * pgstat_create_resqueue
+ *
+ * Called when a resource queue is created via DDL.  Ensures a stats entry
+ * exists and is initialised.
+ * ----------
+ */
+void
+pgstat_create_resqueue(Oid queueid)
+{
+       pgstat_create_transactional(PGSTAT_KIND_RESQUEUE, InvalidOid, queueid);
+       pgstat_get_entry_ref(PGSTAT_KIND_RESQUEUE, InvalidOid, queueid, true, 
NULL);
+       pgstat_reset_entry(PGSTAT_KIND_RESQUEUE, InvalidOid, queueid, 0);
+}
+
+/* ----------
+ * pgstat_drop_resqueue
+ *
+ * Called when a resource queue is dropped via DDL.
+ * ----------
+ */
+void
+pgstat_drop_resqueue(Oid queueid)
+{
+       pgstat_drop_transactional(PGSTAT_KIND_RESQUEUE, InvalidOid, queueid);
+}
+
+/* ----------
+ * pgstat_fetch_stat_resqueue
+ *
+ * Return a palloc'd snapshot of statistics for the given resource queue OID,
+ * or NULL if no stats entry exists.
+ * ----------
+ */
+PgStat_StatResQueueEntry *
+pgstat_fetch_stat_resqueue(Oid queueid)
+{
+       return (PgStat_StatResQueueEntry *)
+               pgstat_fetch_entry(PGSTAT_KIND_RESQUEUE, InvalidOid, queueid);
+}
+
+/* ----------
+ * pgstat_resqueue_flush_cb
+ *
+ * Flush pending per-queue delta counters into shared memory.
+ * Called by pgstat_report_stat() for each entry with pending data.
+ *
+ * max_wait_secs and max_exec_secs are merged with MAX rather than addition.
+ * ----------
+ */
+bool
+pgstat_resqueue_flush_cb(PgStat_EntryRef *entry_ref, bool nowait)
+{
+       PgStat_ResQueueCounts *localent;
+       PgStatShared_ResQueue *shqent;
+
+       localent = (PgStat_ResQueueCounts *) entry_ref->pending;
+       shqent = (PgStatShared_ResQueue *) entry_ref->shared_stats;
+
+       if (!pgstat_lock_entry(entry_ref, nowait))
+               return false;
+
+#define RESQUEUE_ACC(fld)      shqent->stats.fld += localent->fld
+       RESQUEUE_ACC(queries_submitted);
+       RESQUEUE_ACC(queries_admitted);
+       RESQUEUE_ACC(queries_rejected);
+       RESQUEUE_ACC(queries_completed);
+       RESQUEUE_ACC(elapsed_wait_secs);
+       RESQUEUE_ACC(elapsed_exec_secs);
+       RESQUEUE_ACC(total_cost);
+       RESQUEUE_ACC(total_memory_kb);
+#undef RESQUEUE_ACC
+
+       /* max fields: merge with MAX */
+       if (localent->max_wait_secs > shqent->stats.max_wait_secs)
+               shqent->stats.max_wait_secs = localent->max_wait_secs;
+       if (localent->max_exec_secs > shqent->stats.max_exec_secs)
+               shqent->stats.max_exec_secs = localent->max_exec_secs;
+
+       pgstat_unlock_entry(entry_ref);
+
+       return true;
+}
+
+/* ----------
+ * pgstat_resqueue_reset_timestamp_cb
+ *
+ * Reset the stat_reset_timestamp in the shared entry.
+ * ----------
+ */
+void
+pgstat_resqueue_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts)
+{
+       ((PgStatShared_ResQueue *) header)->stats.stat_reset_timestamp = ts;
+}
diff --git a/src/backend/utils/adt/pgstatfuncs.c 
b/src/backend/utils/adt/pgstatfuncs.c
index aeffe94a39e..61c69b3ca4e 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -2116,6 +2116,84 @@ pg_stat_get_subscription_stats(PG_FUNCTION_ARGS)
        PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, 
nulls)));
 }
 
+/*
+ * pg_stat_get_resqueue_stats
+ *
+ * Returns cumulative statistics for one resource queue as a composite row.
+ * Returns all-zeros if no stats entry exists for the given queue OID.
+ */
+Datum
+pg_stat_get_resqueue_stats(PG_FUNCTION_ARGS)
+{
+#define PG_STAT_GET_RESQUEUE_STATS_COLS        13
+       Oid                     queueid = PG_GETARG_OID(0);
+       TupleDesc       tupdesc;
+       Datum           values[PG_STAT_GET_RESQUEUE_STATS_COLS] = {0};
+       bool            nulls[PG_STAT_GET_RESQUEUE_STATS_COLS] = {0};
+       PgStat_StatResQueueEntry *entry;
+       PgStat_StatResQueueEntry allzero;
+
+       /* Fetch stats; fall back to all-zero if queue has no stats yet */
+       entry = pgstat_fetch_stat_resqueue(queueid);
+       if (!entry)
+       {
+               memset(&allzero, 0, sizeof(PgStat_StatResQueueEntry));
+               entry = &allzero;
+       }
+
+       tupdesc = CreateTemplateTupleDesc(PG_STAT_GET_RESQUEUE_STATS_COLS);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 1,  "queueid",
+                                          OIDOID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 2,  "queries_submitted",
+                                          INT8OID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 3,  "queries_admitted",
+                                          INT8OID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 4,  "queries_rejected",
+                                          INT8OID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 5,  "queries_completed",
+                                          INT8OID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 6,  "elapsed_wait_secs",
+                                          INT8OID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 7,  "max_wait_secs",
+                                          INT8OID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 8,  "elapsed_exec_secs",
+                                          INT8OID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 9,  "max_exec_secs",
+                                          INT8OID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 10, "total_cost",
+                                          INT8OID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 11, "total_memory_kb",
+                                          INT8OID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 12, "stat_reset_timestamp",
+                                          TIMESTAMPTZOID, -1, 0);
+       TupleDescInitEntry(tupdesc, (AttrNumber) 13, "have_stats",
+                                          BOOLOID, -1, 0);
+       BlessTupleDesc(tupdesc);
+
+       values[0]  = ObjectIdGetDatum(queueid);
+       values[1]  = Int64GetDatum(entry->queries_submitted);
+       values[2]  = Int64GetDatum(entry->queries_admitted);
+       values[3]  = Int64GetDatum(entry->queries_rejected);
+       values[4]  = Int64GetDatum(entry->queries_completed);
+       values[5]  = Int64GetDatum(entry->elapsed_wait_secs);
+       values[6]  = Int64GetDatum(entry->max_wait_secs);
+       values[7]  = Int64GetDatum(entry->elapsed_exec_secs);
+       values[8]  = Int64GetDatum(entry->max_exec_secs);
+       values[9]  = Int64GetDatum(entry->total_cost);
+       values[10] = Int64GetDatum(entry->total_memory_kb);
+
+       if (entry->stat_reset_timestamp == 0)
+               nulls[11] = true;
+       else
+               values[11] = TimestampTzGetDatum(entry->stat_reset_timestamp);
+
+       /* have_stats: true when an actual stats entry was found */
+       values[12] = BoolGetDatum(pgstat_have_entry(PGSTAT_KIND_RESQUEUE,
+                                                                               
                 InvalidOid, queueid));
+
+       PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, 
nulls)));
+}
+
 /*
  * Checks for presence of stats for object with provided kind, database oid,
  * object oid.
diff --git a/src/backend/utils/resscheduler/resqueue.c 
b/src/backend/utils/resscheduler/resqueue.c
index 3af903f089a..5cb7d9bffa1 100644
--- a/src/backend/utils/resscheduler/resqueue.c
+++ b/src/backend/utils/resscheduler/resqueue.c
@@ -1367,8 +1367,7 @@ ResProcLockRemoveSelfAndWakeup(LOCK *lock)
 
                        nextproc = (PGPROC *) proc->links.next;
 
-                       dclist_delete_from(waitQueue, &(proc->links));
-                       (proc->waitLock->waitProcs.count)--;
+                       dclist_delete_from_thoroughly(waitQueue, 
&(proc->links));
 
                        proc = nextproc;
 
@@ -1472,8 +1471,7 @@ ResProcWakeup(PGPROC *proc, int waitStatus)
        retProc = (PGPROC *) proc->links.next;
 
        /* Remove process from wait queue */
-       dclist_delete_from(&proc->waitLock->waitProcs, &(proc->links));
-       (proc->waitLock->waitProcs.count)--;
+       dclist_delete_from_thoroughly(&proc->waitLock->waitProcs, 
&(proc->links));
 
        /* Clean up process' state and pass it the ok/fail signal */
        proc->waitLock = NULL;
@@ -1511,8 +1509,7 @@ ResRemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
        Assert(waitLock->waitProcs.count > 0);
 
        /* Remove proc from lock's wait queue */
-       dclist_delete_from(&waitLock->waitProcs, &(proc->links));
-       waitLock->waitProcs.count--;
+       dclist_delete_from_thoroughly(&waitLock->waitProcs, &(proc->links));
 
        /* Undo increments of request counts by waiting process */
        Assert(waitLock->nRequested > 0);
diff --git a/src/backend/utils/resscheduler/resscheduler.c 
b/src/backend/utils/resscheduler/resscheduler.c
index 375982b3015..5a09d5541eb 100644
--- a/src/backend/utils/resscheduler/resscheduler.c
+++ b/src/backend/utils/resscheduler/resscheduler.c
@@ -677,28 +677,35 @@ ResLockPortal(Portal portal, QueryDesc *qDesc)
                if (takeLock)
                {
 #ifdef RESLOCK_DEBUG
-                       elog(DEBUG1, "acquire resource lock for queue %u 
(portal %u)", 
+                       elog(DEBUG1, "acquire resource lock for queue %u 
(portal %u)",
                                        queueid, portal->portalId);
 #endif
                        SET_LOCKTAG_RESOURCE_QUEUE(tag, queueid);
 
+                       /* Record wait-start and submission for pgstat 
tracking. */
+                       pgstat_resqueue_wait_start(portal->portalId, queueid,
+                                                                          
incData.increments[RES_COST_LIMIT],
+                                                                          
(int64) (incData.increments[RES_MEMORY_LIMIT] / 1024));
+
                        PG_TRY();
                        {
                                lockResult = ResLockAcquire(&tag, &incData);
                        }
                        PG_CATCH();
                        {
-                               /* 
-                                * We might have been waiting for a resource 
queue lock when we get 
-                                * here. Calling ResLockRelease without calling 
ResLockWaitCancel will 
+                               /*
+                                * We might have been waiting for a resource 
queue lock when we get
+                                * here. Calling ResLockRelease without calling 
ResLockWaitCancel will
                                 * cause the locallock to be cleaned up, but 
will leave the global
-                                * variable lockAwaited still pointing to the 
locallock hash 
+                                * variable lockAwaited still pointing to the 
locallock hash
                                 * entry.
                                 */
                                ResLockWaitCancel();
-               
 
-                               /* If we had acquired the resource queue lock, 
release it and clean up */       
+                               /* Count this portal as rejected in pgstat. */
+                               pgstat_resqueue_rejected(portal->portalId);
+
+                               /* If we had acquired the resource queue lock, 
release it and clean up */
                                if (!ResLockRelease(&tag, portal->portalId))
                                {
                                        ereport(LOG,
@@ -709,7 +716,7 @@ ResLockPortal(Portal portal, QueryDesc *qDesc)
                                                                           
tag.locktag_field1, portal->portalId,
                                                                           
portal->name, portal->sourceText)));
                                }
-                       
+
                                /* GPDB hook for collecting query info */
                                if (query_info_collect_hook)
                                        
(*query_info_collect_hook)(METRICS_QUERY_ERROR, qDesc);
@@ -721,17 +728,24 @@ ResLockPortal(Portal portal, QueryDesc *qDesc)
                        }
                        PG_END_TRY();
 
-                       /* 
+                       /*
                         * See if query was too small to bother locking at all, 
i.e had
                         * cost smaller than the ignore cost threshold for the 
queue.
                         */
                        if (lockResult == LOCKACQUIRE_NOT_AVAIL)
                        {
 #ifdef RESLOCK_DEBUG
-                               elog(DEBUG1, "cancel resource lock for queue %u 
(portal %u)", 
+                               elog(DEBUG1, "cancel resource lock for queue %u 
(portal %u)",
                                                queueid, portal->portalId);
 #endif
-                               /* 
+                               /*
+                                * Query cost was below the ignore threshold; 
the portal won't
+                                * consume a queue slot.  Remove the pgstat 
portal entry we
+                                * created above without counting it as 
admitted.
+                                */
+                               pgstat_resqueue_rejected(portal->portalId);
+
+                               /*
                                 * Reset portalId and queueid for this portal 
so the queue
                                 * and increment accounting tests continue to 
work properly.
                                 */
@@ -739,6 +753,11 @@ ResLockPortal(Portal portal, QueryDesc *qDesc)
                                portal->portalId = INVALID_PORTALID;
                                shouldReleaseLock = false;
                        }
+                       else
+                       {
+                               /* Portal was admitted into the queue; record 
exec-start time. */
+                               pgstat_resqueue_wait_end(portal->portalId);
+                       }
 
                        /* Count holdable cursors (if we are locking this one) 
.*/
                        if (portal->cursorOptions & CURSOR_OPT_HOLD && 
shouldReleaseLock)
@@ -789,6 +808,11 @@ ResLockUtilityPortal(Portal portal, float4 ignoreCostLimit)
 #endif
                SET_LOCKTAG_RESOURCE_QUEUE(tag, queueid);
 
+               /* Record wait-start for utility statement. */
+               pgstat_resqueue_wait_start(portal->portalId, queueid,
+                                                                  (Cost) 
incData.increments[RES_COST_LIMIT],
+                                                                  0 /* no 
memory tracking for utility stmts */);
+
                PG_TRY();
                {
                        lockResult = ResLockAcquire(&tag, &incData);
@@ -804,6 +828,9 @@ ResLockUtilityPortal(Portal portal, float4 ignoreCostLimit)
                         */
                        ResLockWaitCancel();
 
+                       /* Count this portal as rejected in pgstat. */
+                       pgstat_resqueue_rejected(portal->portalId);
+
                        /* If we had acquired the resource queue lock, release 
it and clean up */
                        if (!ResLockRelease(&tag, portal->portalId))
                        {
@@ -826,8 +853,13 @@ ResLockUtilityPortal(Portal portal, float4 ignoreCostLimit)
                        PG_RE_THROW();
                }
                PG_END_TRY();
+
+               if (lockResult != LOCKACQUIRE_NOT_AVAIL)
+                       pgstat_resqueue_wait_end(portal->portalId);
+               else
+                       pgstat_resqueue_rejected(portal->portalId);
        }
-       
+
        portal->hasResQueueLock = shouldReleaseLock;
 }
 
@@ -842,15 +874,19 @@ ResUnLockPortal(Portal portal)
 
        queueid = portal->queueId;
 
-       /* 
+       /*
         * Check we have a valid queue before going any further.
         */
        if (queueid != InvalidOid)
        {
 #ifdef RESLOCK_DEBUG
-               elog(DEBUG1, "release resource lock for queue %u (portal %u)", 
+               elog(DEBUG1, "release resource lock for queue %u (portal %u)",
                         queueid, portal->portalId);
 #endif
+
+               /* Record execution completion in pgstat. */
+               pgstat_resqueue_exec_end(portal->portalId);
+
                SET_LOCKTAG_RESOURCE_QUEUE(tag, queueid);
 
                if (!ResLockRelease(&tag, portal->portalId))
diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat
index 3b1f3a7e327..f788bd349e4 100644
--- a/src/include/catalog/pg_proc.dat
+++ b/src/include/catalog/pg_proc.dat
@@ -13005,3 +13005,11 @@
 { oid => 8693, descr => 'deparse SCHEDULE clause for a given dynamic table',
   proname => 'pg_get_dynamic_table_schedule', provolatile => 's', prorettype 
=> 'text',
   proargtypes => 'oid', prosrc => 'pg_get_dynamic_table_schedule' },
+
+{ oid => '9200', descr => 'statistics: cumulative statistics for a resource 
queue',
+  proname => 'pg_stat_get_resqueue_stats', provolatile => 's',
+  proparallel => 'r', prorettype => 'record', proargtypes => 'oid',
+  proallargtypes => 
'{oid,oid,int8,int8,int8,int8,int8,int8,int8,int8,int8,int8,timestamptz,bool}',
+  proargmodes => '{i,o,o,o,o,o,o,o,o,o,o,o,o,o}',
+  proargnames => 
'{queueid,queueid,queries_submitted,queries_admitted,queries_rejected,queries_completed,elapsed_wait_secs,max_wait_secs,elapsed_exec_secs,max_exec_secs,total_cost,total_memory_kb,stat_reset_timestamp,have_stats}',
+  prosrc => 'pg_stat_get_resqueue_stats' },
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 338f5ae9562..c4f3b88444a 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -45,6 +45,7 @@ typedef enum PgStat_Kind
        PGSTAT_KIND_FUNCTION,           /* per-function statistics */
        PGSTAT_KIND_REPLSLOT,           /* per-slot statistics */
        PGSTAT_KIND_SUBSCRIPTION,       /* per-subscription statistics */
+       PGSTAT_KIND_RESQUEUE,           /* per-resource-queue statistics */
 
        /* stats for fixed-numbered objects */
        PGSTAT_KIND_ARCHIVER,
@@ -237,7 +238,7 @@ typedef struct PgStat_TableXactStatus
  * ------------------------------------------------------------
  */
 
-#define PGSTAT_FILE_FORMAT_ID  0x01A5BCAC
+#define PGSTAT_FILE_FORMAT_ID  0x01A5BCAD
 
 typedef struct PgStat_ArchiverStats
 {
@@ -457,6 +458,63 @@ typedef struct PgStat_PendingWalStats
 } PgStat_PendingWalStats;
 
 
+/* ----------
+ * PgStat_StatResQueueEntry
+ *
+ * Per-resource-queue cumulative statistics, stored in shared memory and
+ * persisted to disk. Exposed via the pg_stat_resqueues view.
+ *
+ * Time values are in seconds (matching time() granularity used during
+ * portal tracking). max_wait_secs and max_exec_secs are historical peaks.
+ * ----------
+ */
+typedef struct PgStat_StatResQueueEntry
+{
+       /* throughput counters */
+       PgStat_Counter queries_submitted;       /* total queries entered the 
queue */
+       PgStat_Counter queries_admitted;        /* total queries admitted from 
queue */
+       PgStat_Counter queries_rejected;        /* queries cancelled/errored 
while waiting */
+       PgStat_Counter queries_completed;       /* queries finished execution */
+
+       /* wait time (seconds) */
+       PgStat_Counter elapsed_wait_secs;       /* cumulative wait seconds */
+       PgStat_Counter max_wait_secs;           /* peak single-query wait time 
*/
+
+       /* exec time (seconds) */
+       PgStat_Counter elapsed_exec_secs;       /* cumulative exec seconds */
+       PgStat_Counter max_exec_secs;           /* peak single-query exec time 
*/
+
+       /* resource usage */
+       PgStat_Counter total_cost;                      /* cumulative planner 
cost estimate */
+       PgStat_Counter total_memory_kb;         /* cumulative memory granted 
(KB) */
+
+       TimestampTz stat_reset_timestamp;
+} PgStat_StatResQueueEntry;
+
+/* ----------
+ * PgStat_ResQueueCounts
+ *
+ * Pending (not-yet-flushed) per-resource-queue delta counters accumulated by
+ * a single backend. Flushed into PgStat_StatResQueueEntry in shared memory
+ * during pgstat_report_stat().
+ *
+ * This struct must contain only delta counters so that memcmp against zeroes
+ * reliably detects whether there are pending updates.
+ * ----------
+ */
+typedef struct PgStat_ResQueueCounts
+{
+       PgStat_Counter queries_submitted;
+       PgStat_Counter queries_admitted;
+       PgStat_Counter queries_rejected;
+       PgStat_Counter queries_completed;
+       PgStat_Counter elapsed_wait_secs;
+       PgStat_Counter elapsed_exec_secs;
+       PgStat_Counter max_wait_secs;           /* max in this flush batch */
+       PgStat_Counter max_exec_secs;           /* max in this flush batch */
+       PgStat_Counter total_cost;
+       PgStat_Counter total_memory_kb;
+} PgStat_ResQueueCounts;
 
 
 /*
@@ -702,6 +760,20 @@ extern void pgstat_drop_subscription(Oid subid);
 extern PgStat_StatSubEntry *pgstat_fetch_stat_subscription(Oid subid);
 
 
+/*
+ * Functions in pgstat_resqueue.c
+ */
+
+extern void pgstat_resqueue_wait_start(uint32 portalid, Oid queueid,
+                                                                          Cost 
query_cost, int64 query_memory_kb);
+extern void pgstat_resqueue_wait_end(uint32 portalid);
+extern void pgstat_resqueue_rejected(uint32 portalid);
+extern void pgstat_resqueue_exec_end(uint32 portalid);
+extern void pgstat_create_resqueue(Oid queueid);
+extern void pgstat_drop_resqueue(Oid queueid);
+extern PgStat_StatResQueueEntry *pgstat_fetch_stat_resqueue(Oid queueid);
+
+
 /*
  * Functions in pgstat_xact.c
  */
diff --git a/src/include/utils/pgstat_internal.h 
b/src/include/utils/pgstat_internal.h
index f886ab7f4bc..2b3da610a31 100644
--- a/src/include/utils/pgstat_internal.h
+++ b/src/include/utils/pgstat_internal.h
@@ -413,6 +413,37 @@ typedef struct PgStatShared_ReplSlot
        PgStat_StatReplSlotEntry stats;
 } PgStatShared_ReplSlot;
 
+typedef struct PgStatShared_ResQueue
+{
+       PgStatShared_Common header;
+       PgStat_StatResQueueEntry stats;
+} PgStatShared_ResQueue;
+
+/* ----------
+ * PgStat_ResQueuePortalEntry
+ *
+ * Backend-local tracking entry for a single portal subject to resource queue
+ * scheduling. Records timestamps and resource info for a portal's lifetime,
+ * then rolls them into per-queue PgStat_ResQueueCounts when the portal exits.
+ *
+ * The collector never sees this struct.
+ * ----------
+ */
+typedef struct PgStat_ResQueuePortalEntry
+{
+       uint32          portalid;                       /* hash key */
+       Oid                     queueid;                        /* resource 
queue this portal belongs to */
+       time_t          t_wait_start;           /* time portal started waiting 
in queue */
+       time_t          t_exec_start;           /* time portal was admitted & 
began executing */
+       Cost            query_cost;                     /* planner cost 
estimate */
+       int64           query_memory_kb;        /* memory granted (KB) */
+} PgStat_ResQueuePortalEntry;
+
+/* Callbacks for pgstat_kind_infos registration (used in pgstat.c). */
+extern bool pgstat_resqueue_flush_cb(PgStat_EntryRef *entry_ref, bool nowait);
+extern void pgstat_resqueue_reset_timestamp_cb(PgStatShared_Common *header,
+                                                                               
           TimestampTz ts);
+
 
 /*
  * Central shared memory entry for the cumulative stats system.
diff --git a/src/test/isolation2/expected/resource_queue_deadlock.out 
b/src/test/isolation2/expected/resource_queue_deadlock.out
index 06309f34b6d..3c14d72ba4d 100644
--- a/src/test/isolation2/expected/resource_queue_deadlock.out
+++ b/src/test/isolation2/expected/resource_queue_deadlock.out
@@ -5,6 +5,8 @@
 CREATE
 0: CREATE role role_deadlock_test RESOURCE QUEUE rq_deadlock_test;
 CREATE
+0: GRANT CREATE ON SCHEMA public TO role_deadlock_test;
+GRANT
 0: SET gp_autostats_lock_wait TO ON;
 SET
 
@@ -50,8 +52,8 @@ ANALYZE
 (1 row)
 2: SELECT * FROM t_deadlock_test;
 ERROR:  deadlock detected
-DETAIL:  Process 1618 waits for ExclusiveLock on resource queue 16520; blocked 
by process 1606.
-Process 1606 waits for ShareUpdateExclusiveLock on relation 16522 of database 
16478; blocked by process 1618.
+Process PID waits for ExclusiveLock on resource queue OID; blocked by process 
PID.
+Process PID waits for ShareUpdateExclusiveLock on relation OID of database 
OID; blocked by process PID.
 HINT:  See server log for query details.
 2: ROLLBACK;
 ROLLBACK
@@ -68,6 +70,8 @@ INSERT 1
 -- Clean up the test
 0: DROP TABLE t_deadlock_test;
 DROP
+0: REVOKE CREATE ON SCHEMA public FROM role_deadlock_test;
+REVOKE
 0: DROP ROLE role_deadlock_test;
 DROP
 0: DROP RESOURCE QUEUE rq_deadlock_test;
diff --git a/src/test/isolation2/expected/resource_queue_multi_portal.out 
b/src/test/isolation2/expected/resource_queue_multi_portal.out
index 5fe141ccc9c..8070edc230f 100644
--- a/src/test/isolation2/expected/resource_queue_multi_portal.out
+++ b/src/test/isolation2/expected/resource_queue_multi_portal.out
@@ -100,8 +100,8 @@ DECLARE
 -- After ending the transactions, there should be 0 active statements.
 1<:  <... completed>
 ERROR:  deadlock detected
-DETAIL:  Process 738539 waits for ExclusiveLock on resource queue 90366; 
blocked by process 738548.
-Process 738548 waits for ExclusiveLock on resource queue 90366; blocked by 
process 738539.
+Process PID waits for ExclusiveLock on resource queue OID; blocked by process 
PID.
+Process PID waits for ExclusiveLock on resource queue OID; blocked by process 
PID.
 HINT:  See server log for query details.
 1:END;
 END
@@ -135,7 +135,7 @@ DECLARE
 -- its transaction.
 1:DECLARE c3 CURSOR FOR SELECT 1;
 ERROR:  deadlock detected, locking against self
-DETAIL:  resource queue id: 585193, portal id: 3
+DETAIL:  resource queue id: OID, portal id: NUM
 
 -- There should be 0 active statements following the transaction abort.
 0:SELECT rsqcountlimit, rsqcountvalue FROM pg_resqueue_status WHERE rsqname = 
'rq_multi_portal';
diff --git a/src/test/isolation2/expected/resqueue_stats.out 
b/src/test/isolation2/expected/resqueue_stats.out
new file mode 100644
index 00000000000..9bf02f77060
--- /dev/null
+++ b/src/test/isolation2/expected/resqueue_stats.out
@@ -0,0 +1,92 @@
+-- Test pg_stat_resqueues cumulative statistics for resource queues.
+
+0:CREATE RESOURCE QUEUE rq_stats_test WITH (active_statements = 1);
+CREATE
+0:CREATE ROLE role_stats_test RESOURCE QUEUE rq_stats_test;
+CREATE
+
+-- Session 1 holds the queue slot so session 2 will block.
+1:SET role role_stats_test;
+SET
+1:BEGIN;
+BEGIN
+1:DECLARE c1 CURSOR FOR SELECT 1;
+DECLARE
+
+-- Session 2 submits a query that will block.
+2:SET role role_stats_test;
+SET
+2&:SELECT pg_sleep(0);  <waiting ...>
+
+-- Verify session 2 is waiting on the resource queue.
+0:SELECT wait_event_type, wait_event FROM pg_stat_activity WHERE query = 
'SELECT pg_sleep(0);';
+ wait_event_type | wait_event    
+-----------------+---------------
+ ResourceQueue   | ResourceQueue 
+(1 row)
+
+-- Cancel the blocked query (increments queries_rejected).
+0:SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query = 'SELECT 
pg_sleep(0);' AND wait_event = 'ResourceQueue';
+ pg_cancel_backend 
+-------------------
+ t                 
+(1 row)
+
+2<:  <... completed>
+ERROR:  canceling statement due to user request
+
+-- Release session 1's slot so later sessions can proceed.
+1:CLOSE c1;
+CLOSE
+1:END;
+END
+
+-- Session 3 runs a query that should be admitted and complete normally.
+3:SET role role_stats_test;
+SET
+3:SELECT 1;
+ ?column? 
+----------
+ 1        
+(1 row)
+
+-- Session 4 runs another query that completes normally.
+4:SET role role_stats_test;
+SET
+4:SELECT 2;
+ ?column? 
+----------
+ 2        
+(1 row)
+
+-- All resqueue stats are written directly to shared memory (no flush needed).
+-- Check that the view shows the expected minimum counts.
+-- queries_submitted >= 3: sessions 2 (rejected), 3, 4
+-- queries_admitted  >= 2: sessions 3 and 4
+-- queries_completed >= 2: sessions 3 and 4
+0:SELECT queuename, queries_submitted >= 3 AS submitted_ok, queries_admitted  
>= 2 AS admitted_ok, queries_completed >= 2 AS completed_ok FROM 
pg_stat_resqueues WHERE queuename = 'rq_stats_test';
+ queuename     | submitted_ok | admitted_ok | completed_ok 
+---------------+--------------+-------------+--------------
+ rq_stats_test | t            | t           | t            
+(1 row)
+
+-- All counter columns must be non-negative.
+0:SELECT queries_submitted >= 0 AS sub_nn, queries_admitted  >= 0 AS adm_nn, 
queries_rejected  >= 0 AS rej_nn, queries_completed >= 0 AS cmp_nn, 
total_wait_time_secs >= 0 AS wait_nn, max_wait_secs     >= 0 AS maxw_nn, 
total_exec_time_secs >= 0 AS exec_nn, max_exec_secs     >= 0 AS maxe_nn, 
total_cost        >= 0 AS cost_nn, total_memory_kb   >= 0 AS mem_nn FROM 
pg_stat_resqueues WHERE queuename = 'rq_stats_test';
+ sub_nn | adm_nn | rej_nn | cmp_nn | wait_nn | maxw_nn | exec_nn | maxe_nn | 
cost_nn | mem_nn 
+--------+--------+--------+--------+---------+---------+---------+---------+---------+--------
+ t      | t      | t      | t      | t       | t       | t       | t       | t 
      | t      
+(1 row)
+
+-- Verify pg_stat_get_resqueue_stats() returns data directly.
+-- The function has OUT parameters so no column definition list is needed.
+0:SELECT queries_submitted >= 0 AS ok FROM pg_stat_get_resqueue_stats( (SELECT 
oid FROM pg_resqueue WHERE rsqname = 'rq_stats_test') ) AS s;
+ ok   
+------
+ t    
+(1 row)
+
+-- Cleanup.
+0:DROP ROLE role_stats_test;
+DROP
+0:DROP RESOURCE QUEUE rq_stats_test;
+DROP
diff --git a/src/test/isolation2/init_file_isolation2 
b/src/test/isolation2/init_file_isolation2
index 1c01246e203..c359d3b8254 100644
--- a/src/test/isolation2/init_file_isolation2
+++ b/src/test/isolation2/init_file_isolation2
@@ -46,6 +46,10 @@ s/^DETAIL:  Process \d+ waits for ShareLock on transaction 
\d+; blocked by proce
 m/.*Process \d+ waits for ExclusiveLock on resource queue \d+; blocked by 
process \d+./
 s/.*Process \d+ waits for ExclusiveLock on resource queue \d+; blocked by 
process \d+./Process PID waits for ExclusiveLock on resource queue OID; blocked 
by process PID./
 
+# For resource queue self-deadlock detail
+m/resource queue id: \d+, portal id: \d+/
+s/resource queue id: \d+, portal id: \d+/resource queue id: OID, portal id: 
NUM/
+
 m/^Process \d+ waits for ShareLock on transaction \d+; blocked by process \d+./
 s/^Process \d+ waits for ShareLock on transaction \d+; blocked by process 
\d+./Process PID waits for ShareLock on transaction XID; blocked by process 
PID./
 
diff --git a/src/test/isolation2/sql/resource_queue_deadlock.sql 
b/src/test/isolation2/sql/resource_queue_deadlock.sql
index 6591e8c75b6..1b2ae432c06 100644
--- a/src/test/isolation2/sql/resource_queue_deadlock.sql
+++ b/src/test/isolation2/sql/resource_queue_deadlock.sql
@@ -3,6 +3,7 @@
 
 0: CREATE RESOURCE QUEUE rq_deadlock_test WITH (active_statements = 1);
 0: CREATE role role_deadlock_test RESOURCE QUEUE rq_deadlock_test;
+0: GRANT CREATE ON SCHEMA public TO role_deadlock_test;
 0: SET gp_autostats_lock_wait TO ON;
 
 0: SELECT gp_inject_fault_infinite('before_auto_stats', 'suspend', dbid) FROM 
gp_segment_configuration WHERE content = -1 AND role = 'p';
@@ -30,5 +31,6 @@
 
 -- Clean up the test
 0: DROP TABLE t_deadlock_test;
+0: REVOKE CREATE ON SCHEMA public FROM role_deadlock_test;
 0: DROP ROLE role_deadlock_test;
 0: DROP RESOURCE QUEUE rq_deadlock_test;
diff --git a/src/test/isolation2/sql/resqueue_stats.sql 
b/src/test/isolation2/sql/resqueue_stats.sql
new file mode 100644
index 00000000000..01e6041f839
--- /dev/null
+++ b/src/test/isolation2/sql/resqueue_stats.sql
@@ -0,0 +1,72 @@
+-- Test pg_stat_resqueues cumulative statistics for resource queues.
+
+0:CREATE RESOURCE QUEUE rq_stats_test WITH (active_statements = 1);
+0:CREATE ROLE role_stats_test RESOURCE QUEUE rq_stats_test;
+
+-- Session 1 holds the queue slot so session 2 will block.
+1:SET role role_stats_test;
+1:BEGIN;
+1:DECLARE c1 CURSOR FOR SELECT 1;
+
+-- Session 2 submits a query that will block.
+2:SET role role_stats_test;
+2&:SELECT pg_sleep(0);
+
+-- Verify session 2 is waiting on the resource queue.
+0:SELECT wait_event_type, wait_event FROM pg_stat_activity WHERE query = 
'SELECT pg_sleep(0);';
+
+-- Cancel the blocked query (increments queries_rejected).
+0:SELECT pg_cancel_backend(pid) FROM pg_stat_activity WHERE query = 'SELECT 
pg_sleep(0);' AND wait_event = 'ResourceQueue';
+
+2<:
+
+-- Release session 1's slot so later sessions can proceed.
+1:CLOSE c1;
+1:END;
+
+-- Session 3 runs a query that should be admitted and complete normally.
+3:SET role role_stats_test;
+3:SELECT 1;
+
+-- Session 4 runs another query that completes normally.
+4:SET role role_stats_test;
+4:SELECT 2;
+
+-- All resqueue stats are written directly to shared memory (no flush needed).
+-- Check that the view shows the expected minimum counts.
+-- queries_submitted >= 3: sessions 2 (rejected), 3, 4
+-- queries_admitted  >= 2: sessions 3 and 4
+-- queries_completed >= 2: sessions 3 and 4
+0:SELECT
+    queuename,
+    queries_submitted >= 3 AS submitted_ok,
+    queries_admitted  >= 2 AS admitted_ok,
+    queries_completed >= 2 AS completed_ok
+FROM pg_stat_resqueues
+WHERE queuename = 'rq_stats_test';
+
+-- All counter columns must be non-negative.
+0:SELECT
+    queries_submitted >= 0 AS sub_nn,
+    queries_admitted  >= 0 AS adm_nn,
+    queries_rejected  >= 0 AS rej_nn,
+    queries_completed >= 0 AS cmp_nn,
+    total_wait_time_secs >= 0 AS wait_nn,
+    max_wait_secs     >= 0 AS maxw_nn,
+    total_exec_time_secs >= 0 AS exec_nn,
+    max_exec_secs     >= 0 AS maxe_nn,
+    total_cost        >= 0 AS cost_nn,
+    total_memory_kb   >= 0 AS mem_nn
+FROM pg_stat_resqueues
+WHERE queuename = 'rq_stats_test';
+
+-- Verify pg_stat_get_resqueue_stats() returns data directly.
+-- The function has OUT parameters so no column definition list is needed.
+0:SELECT queries_submitted >= 0 AS ok
+FROM pg_stat_get_resqueue_stats(
+        (SELECT oid FROM pg_resqueue WHERE rsqname = 'rq_stats_test')
+     ) AS s;
+
+-- Cleanup.
+0:DROP ROLE role_stats_test;
+0:DROP RESOURCE QUEUE rq_stats_test;


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to