From: Vineeth Pillai <[email protected]>

Replace trace_foo() with the new trace_call__foo() at sites already
guarded by trace_foo_enabled(), avoiding a redundant
static_branch_unlikely() re-evaluation inside the tracepoint.
trace_call__foo() calls the tracepoint callbacks directly without
utilizing the static branch again.

Original v2 series:
https://lore.kernel.org/linux-trace-kernel/[email protected]/

Parts of the original v2 series have already been merged in mainline.
This patch is being reposted as a follow-up cleanup for the remaining
unmerged pieces.

Suggested-by: Steven Rostedt <[email protected]>
Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Vineeth Pillai (Google) <[email protected]>
Assisted-by: Claude:claude-sonnet-4-6
---
 kernel/time/tick-sched.c       | 12 ++++++------
 kernel/trace/trace_benchmark.c |  2 +-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cbbb87a0c6e7..3b42ee75f48c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -348,32 +348,32 @@ static bool check_tick_dependency(atomic_t *dep)
                return !!val;
 
        if (val & TICK_DEP_MASK_POSIX_TIMER) {
-               trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
+               trace_call__tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
                return true;
        }
 
        if (val & TICK_DEP_MASK_PERF_EVENTS) {
-               trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
+               trace_call__tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
                return true;
        }
 
        if (val & TICK_DEP_MASK_SCHED) {
-               trace_tick_stop(0, TICK_DEP_MASK_SCHED);
+               trace_call__tick_stop(0, TICK_DEP_MASK_SCHED);
                return true;
        }
 
        if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
-               trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
+               trace_call__tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
                return true;
        }
 
        if (val & TICK_DEP_MASK_RCU) {
-               trace_tick_stop(0, TICK_DEP_MASK_RCU);
+               trace_call__tick_stop(0, TICK_DEP_MASK_RCU);
                return true;
        }
 
        if (val & TICK_DEP_MASK_RCU_EXP) {
-               trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
+               trace_call__tick_stop(0, TICK_DEP_MASK_RCU_EXP);
                return true;
        }
 
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index e19c32f2a938..189d383934fd 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -51,7 +51,7 @@ static void trace_do_benchmark(void)
 
        local_irq_disable();
        start = trace_clock_local();
-       trace_benchmark_event(bm_str, bm_last);
+       trace_call__benchmark_event(bm_str, bm_last);
        stop = trace_clock_local();
        local_irq_enable();
 
-- 
2.54.0


Reply via email to