From: Vineeth Pillai <[email protected]>

Replace trace_foo() with the new trace_call__foo() at sites already
guarded by trace_foo_enabled(), avoiding a redundant
static_branch_unlikely() re-evaluation inside the tracepoint.
trace_call__foo() calls the tracepoint callbacks directly without
utilizing the static branch again.

Original v2 series:
https://lore.kernel.org/linux-trace-kernel/[email protected]/

Parts of the original v2 series have already been merged in mainline.
This patch is being reposted as a follow-up cleanup for the remaining
unmerged pieces.

Suggested-by: Steven Rostedt <[email protected]>
Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Vineeth Pillai (Google) <[email protected]>
Assisted-by: Claude:claude-sonnet-4-6
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c            |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c            |  4 ++--
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 +++++-----
 drivers/gpu/drm/scheduler/sched_entity.c          |  5 +++--
 4 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b24d5d21be5f..cb0b5cb07d57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1004,7 +1004,7 @@ static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser 
*p)
                struct amdgpu_job *job = p->jobs[i];
 
                for (j = 0; j < job->num_ibs; ++j)
-                       trace_amdgpu_cs(p, job, &job->ibs[j]);
+                       trace_call__amdgpu_cs(p, job, &job->ibs[j]);
        }
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9ba9de16a27a..a36ae94c425f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1415,7 +1415,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, 
struct amdgpu_bo_va *bo_va,
 
        if (trace_amdgpu_vm_bo_mapping_enabled()) {
                list_for_each_entry(mapping, &bo_va->valids, list)
-                       trace_amdgpu_vm_bo_mapping(mapping);
+                       trace_call__amdgpu_vm_bo_mapping(mapping);
        }
 
 error_free:
@@ -2183,7 +2183,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct 
ww_acquire_ctx *ticket)
                                continue;
                }
 
-               trace_amdgpu_vm_bo_cs(mapping);
+               trace_call__amdgpu_vm_bo_cs(mapping);
        }
 }
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5fc5d5608506..fbdc12cdd6bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5263,11 +5263,11 @@ static void amdgpu_dm_backlight_set_level(struct 
amdgpu_display_manager *dm,
        }
 
        if (trace_amdgpu_dm_brightness_enabled()) {
-               trace_amdgpu_dm_brightness(__builtin_return_address(0),
-                                          user_brightness,
-                                          brightness,
-                                          caps->aux_support,
-                                          power_supply_is_system_supplied() > 
0);
+               trace_call__amdgpu_dm_brightness(__builtin_return_address(0),
+                                                user_brightness,
+                                                brightness,
+                                                caps->aux_support,
+                                                
power_supply_is_system_supplied() > 0);
        }
 
        if (caps->aux_support) {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c 
b/drivers/gpu/drm/scheduler/sched_entity.c
index fe174a4857be..185a2636b599 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -429,7 +429,8 @@ static bool drm_sched_entity_add_dependency_cb(struct 
drm_sched_entity *entity,
 
        if (trace_drm_sched_job_unschedulable_enabled() &&
            !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags))
-               trace_drm_sched_job_unschedulable(sched_job, 
entity->dependency);
+               trace_call__drm_sched_job_unschedulable(sched_job,
+                                                       entity->dependency);
 
        if (!dma_fence_add_callback(entity->dependency, &entity->cb,
                                    drm_sched_entity_wakeup))
@@ -586,7 +587,7 @@ void drm_sched_entity_push_job(struct drm_sched_job 
*sched_job)
                unsigned long index;
 
                xa_for_each(&sched_job->dependencies, index, entry)
-                       trace_drm_sched_job_add_dep(sched_job, entry);
+                       trace_call__drm_sched_job_add_dep(sched_job, entry);
        }
        atomic_inc(entity->rq->sched->score);
        WRITE_ONCE(entity->last_user, current->group_leader);
-- 
2.54.0


Reply via email to