The wrapper functions __trace_set_current_state() and __trace_set_need_resched() allow the tracepoints to be called from code outside sched/core.c, those calls are already guarded by a tracepoint_enabled(<tp>) so there is no need to repeat this check once again inside the call using trace_<tp>().
Use the new trace_call__<tp>() API to directly call the tracepoint without check. Those helper functions must be called after the appropriate check. Signed-off-by: Gabriele Monaco <[email protected]> --- kernel/sched/core.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index da20fb6ea25a..c37562b02e24 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -537,10 +537,14 @@ sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } /* need a wrapper since we may need to trace from modules */ EXPORT_TRACEPOINT_SYMBOL(sched_set_state_tp); -/* Call via the helper macro trace_set_current_state. */ +/* + * Call via the helper macro trace_set_current_state. + * Calls to this function MUST be guarded by a + * tracepoint_enabled(sched_set_state_tp) + */ void __trace_set_current_state(int state_value) { - trace_sched_set_state_tp(current, state_value); + trace_call__sched_set_state_tp(current, state_value); } EXPORT_SYMBOL(__trace_set_current_state); @@ -1203,9 +1207,13 @@ static void __resched_curr(struct rq *rq, int tif) } } +/* + * Calls to this function MUST be guarded by a + * tracepoint_enabled(sched_set_need_resched_tp) + */ void __trace_set_need_resched(struct task_struct *curr, int tif) { - trace_sched_set_need_resched_tp(curr, smp_processor_id(), tif); + trace_call__sched_set_need_resched_tp(curr, smp_processor_id(), tif); } EXPORT_SYMBOL_GPL(__trace_set_need_resched); base-commit: 254f49634ee16a731174d2ae34bc50bd5f45e731 -- 2.54.0
