This is an automated email from the ASF dual-hosted git repository.

djwang pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudberry.git

commit c998c0942defb2a53f9a7af8638e0aabf350ffd3
Author: Maxim Smyatkin <[email protected]>
AuthorDate: Tue Dec 26 16:36:26 2023 +0300

    [yagp_hooks_collector] Improve query_id and resource group resolution
    
    Use core query_id from Query instead of a separate hash.  Resolve
    resource group from the current session rather than the role default.
---
 src/EventSender.cpp | 13 ++-----------
 1 file changed, 2 insertions(+), 11 deletions(-)

diff --git a/src/EventSender.cpp b/src/EventSender.cpp
index e3be58b194e..21c2e2117a3 100644
--- a/src/EventSender.cpp
+++ b/src/EventSender.cpp
@@ -54,10 +54,7 @@ std::string *get_db_name() {
 }
 
 std::string *get_rg_name() {
-  auto userId = GetUserId();
-  if (!OidIsValid(userId))
-    return nullptr;
-  auto groupId = GetResGroupIdForRole(userId);
+  auto groupId = ResGroupGetGroupIdBySessionId(MySessionState->sessionId);
   if (!OidIsValid(groupId))
     return nullptr;
   char *rgname = GetResGroupNameForId(groupId);
@@ -119,13 +116,7 @@ void set_query_plan(yagpcc::SetQueryReq *req, QueryDesc 
*query_desc) {
     StringInfo norm_plan = gen_normplan(qi->plan_text().c_str());
     *qi->mutable_template_plan_text() = std::string(norm_plan->data);
     qi->set_plan_id(hash_any((unsigned char *)norm_plan->data, 
norm_plan->len));
-    // TODO: For now assume queryid equal to planid, which is wrong. The
-    // reason for doing so this bug
-    // https://github.com/greenplum-db/gpdb/pull/15385 (ORCA loses
-    // pg_stat_statements` queryid during planning phase). Need to fix it
-    // upstream, cherry-pick and bump gp
-    // qi->set_query_id(query_desc->plannedstmt->queryId);
-    qi->set_query_id(qi->plan_id());
+    qi->set_query_id(query_desc->plannedstmt->queryId);
   }
 }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to