The preempt_offset is always 0 in all the callsites of __cant_sleep(), hence remove it. It also allows us to clear the code a bit by stopping using a "preempt_count() > .." comparison.
Signed-off-by: Boqun Feng <[email protected]> --- include/linux/kernel.h | 4 ++-- kernel/sched/core.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e5570a16cbb1..24414c79e59a 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -72,7 +72,7 @@ extern int dynamic_might_resched(void); #ifdef CONFIG_DEBUG_ATOMIC_SLEEP extern void __might_resched(const char *file, int line, unsigned int offsets); extern void __might_sleep(const char *file, int line); -extern void __cant_sleep(const char *file, int line, int preempt_offset); +extern void __cant_sleep(const char *file, int line); extern void __cant_migrate(const char *file, int line); /** @@ -95,7 +95,7 @@ extern void __cant_migrate(const char *file, int line); * this macro will print a stack trace if it is executed with preemption enabled */ # define cant_sleep() \ - do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) + do { __cant_sleep(__FILE__, __LINE__); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) /** diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b8871449d3c6..75dba7cc09bd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9165,7 +9165,7 @@ void __might_resched(const char *file, int line, unsigned int offsets) } EXPORT_SYMBOL(__might_resched); -void __cant_sleep(const char *file, int line, int preempt_offset) +void __cant_sleep(const char *file, int line) { static unsigned long prev_jiffy; @@ -9175,7 +9175,7 @@ void __cant_sleep(const char *file, int line, int preempt_offset) if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) return; - if (preempt_count() > preempt_offset) + if (preempt_count()) return; if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) -- 2.50.1 (Apple Git-155)

