This is an automated email from the ASF dual-hosted git repository.

yjhjstz pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudberry.git

commit 667481c65faff0890cdfab6fc1d14d6d23ddf88c
Author: Zhenghua Lyu <[email protected]>
AuthorDate: Tue Apr 23 13:39:04 2024 +0800

    Add some LOGs for GDD backends.
    
    Github 17369 reports GDD backend use huge amount of memory and trigger
    oom-killer, until now we don't have RCA. This commit tries to add more
    LOGs so that together with system memory monitoring LOGs we can have
    detailed events' timestamp and narrow down the issue if happens again.
---
 src/backend/utils/gdd/gddbackend.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/src/backend/utils/gdd/gddbackend.c 
b/src/backend/utils/gdd/gddbackend.c
index 49a6e52a05..1c0d5105ba 100644
--- a/src/backend/utils/gdd/gddbackend.c
+++ b/src/backend/utils/gdd/gddbackend.c
@@ -202,6 +202,8 @@ doDeadLockCheck(void)
 
        oldContext = MemoryContextSwitchTo(gddContext);
 
+       elog(LOG, "start a new round of global deadlock check");
+
        PG_TRY();
        {
                ctx = GddCtxNew();
@@ -241,6 +243,8 @@ doDeadLockCheck(void)
        MemoryContextSwitchTo(oldContext);
        MemoryContextReset(gddContext);
 
+       elog(LOG, "finish the round of global deadlock check.");
+
        return ret_status;
 }
 
@@ -283,6 +287,9 @@ buildWaitGraph(GddCtx *ctx)
                        tupdesc = SPI_tuptable->tupdesc;
                        tuptable = SPI_tuptable;
 
+                       elogif(tuple_num > 0, LOG,
+                                "GDD get %d wait relationship from all 
segments.", tuple_num);
+
                        /*
                         * Switch back to gdd memory context otherwise the 
graphs will be
                         * created in SPI memory context and freed in 
SPI_finish().


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to