From: Peter Zijlstra <pet...@infradead.org>

[ Upstream commit baffd723e44dc3d7f84f0b8f1fe1ece00ddd2710 ]

The thinking in commit:

  fddf9055a60d ("lockdep: Use raw_cpu_*() for per-cpu variables")

is flawed. While it is true that when we're migratable both CPUs will
have a 0 value, it doesn't hold that when we do get migrated in the
middle of a raw_cpu_op(), the old CPU will still have 0 by the time we
get around to reading it on the new CPU.

Luckily, the reason for that commit (s390 using preempt_disable()
instead of preempt_disable_notrace() in their percpu code), has since
been fixed by commit:

  1196f12a2c96 ("s390: don't trace preemption in percpu macros")

An audit of arch/*/include/asm/percpu*.h shows there are no other
architectures affected by this particular issue.

Fixes: fddf9055a60d ("lockdep: Use raw_cpu_*() for per-cpu variables")
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Signed-off-by: Ingo Molnar <mi...@kernel.org>
Link: 
https://lkml.kernel.org/r/20201005095958.gj2...@hirez.programming.kicks-ass.net
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 include/linux/lockdep.h | 26 +++++++++-----------------
 1 file changed, 9 insertions(+), 17 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b1227be47496c..1130f271de669 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -512,19 +512,19 @@ static inline void print_irqtrace_events(struct 
task_struct *curr)
 #define lock_map_release(l)                    lock_release(l, _THIS_IP_)
 
 #ifdef CONFIG_PROVE_LOCKING
-# define might_lock(lock)                                              \
+# define might_lock(lock)                                              \
 do {                                                                   \
        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
        lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
        lock_release(&(lock)->dep_map, _THIS_IP_);                      \
 } while (0)
-# define might_lock_read(lock)                                                 
\
+# define might_lock_read(lock)                                         \
 do {                                                                   \
        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
        lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
        lock_release(&(lock)->dep_map, _THIS_IP_);                      \
 } while (0)
-# define might_lock_nested(lock, subclass)                             \
+# define might_lock_nested(lock, subclass)                             \
 do {                                                                   \
        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
        lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,         \
@@ -536,29 +536,21 @@ DECLARE_PER_CPU(int, hardirqs_enabled);
 DECLARE_PER_CPU(int, hardirq_context);
 DECLARE_PER_CPU(unsigned int, lockdep_recursion);
 
-/*
- * The below lockdep_assert_*() macros use raw_cpu_read() to access the above
- * per-cpu variables. This is required because this_cpu_read() will potentially
- * call into preempt/irq-disable and that obviously isn't right. This is also
- * correct because when IRQs are enabled, it doesn't matter if we accidentally
- * read the value from our previous CPU.
- */
-
-#define __lockdep_enabled      (debug_locks && 
!raw_cpu_read(lockdep_recursion))
+#define __lockdep_enabled      (debug_locks && 
!this_cpu_read(lockdep_recursion))
 
 #define lockdep_assert_irqs_enabled()                                  \
 do {                                                                   \
-       WARN_ON_ONCE(__lockdep_enabled && !raw_cpu_read(hardirqs_enabled)); \
+       WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
 } while (0)
 
 #define lockdep_assert_irqs_disabled()                                 \
 do {                                                                   \
-       WARN_ON_ONCE(__lockdep_enabled && raw_cpu_read(hardirqs_enabled)); \
+       WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
 } while (0)
 
 #define lockdep_assert_in_irq()                                                
\
 do {                                                                   \
-       WARN_ON_ONCE(__lockdep_enabled && !raw_cpu_read(hardirq_context)); \
+       WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
 } while (0)
 
 #define lockdep_assert_preemption_enabled()                            \
@@ -566,7 +558,7 @@ do {                                                        
                \
        WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
                     __lockdep_enabled                  &&              \
                     (preempt_count() != 0              ||              \
-                     !raw_cpu_read(hardirqs_enabled)));                \
+                     !this_cpu_read(hardirqs_enabled)));               \
 } while (0)
 
 #define lockdep_assert_preemption_disabled()                           \
@@ -574,7 +566,7 @@ do {                                                        
                \
        WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)   &&              \
                     __lockdep_enabled                  &&              \
                     (preempt_count() == 0              &&              \
-                     raw_cpu_read(hardirqs_enabled)));                 \
+                     this_cpu_read(hardirqs_enabled)));                \
 } while (0)
 
 #else
-- 
2.25.1



Reply via email to