From: Thomas Gleixner <t...@linutronix.de> CONFIG_PREEMPT_COUNT is now unconditionally enabled and will be removed. Cleanup the leftovers before doing so.
Signed-off-by: Thomas Gleixner <t...@linutronix.de> Cc: Peter Zijlstra <pet...@infradead.org> Cc: Ingo Molnar <mi...@kernel.org> Cc: Will Deacon <w...@kernel.org> Acked-by: Will Deacon <w...@kernel.org> [ Rezki: Adopted for 5.10.0-rc1 kernel. ] Signed-off-by: Uladzislau Rezki (Sony) <ure...@gmail.com> --- include/linux/lockdep.h | 6 ++---- lib/Kconfig.debug | 1 - 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index f5594879175a..d05db575f60f 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -580,16 +580,14 @@ do { \ #define lockdep_assert_preemption_enabled() \ do { \ - WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ - __lockdep_enabled && \ + WARN_ON_ONCE(__lockdep_enabled && \ (preempt_count() != 0 || \ !this_cpu_read(hardirqs_enabled))); \ } while (0) #define lockdep_assert_preemption_disabled() \ do { \ - WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ - __lockdep_enabled && \ + WARN_ON_ONCE(__lockdep_enabled && \ (preempt_count() == 0 && \ this_cpu_read(hardirqs_enabled))); \ } while (0) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 89c9a177fb9b..03a85065805e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1159,7 +1159,6 @@ config PROVE_LOCKING select DEBUG_RWSEMS select DEBUG_WW_MUTEX_SLOWPATH select DEBUG_LOCK_ALLOC - select PREEMPT_COUNT select TRACE_IRQFLAGS default n help -- 2.20.1