Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Waiman Long <long...@redhat.com>
Cc: Johannes Berg <johan...@sipsolutions.net>
Signed-off-by: Bart Van Assche <bvanass...@acm.org>
---
 kernel/locking/lockdep.c | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index acf61dbb8b30..72cff86829e6 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -743,6 +743,17 @@ static bool assign_lock_key(struct lockdep_map *lock)
 {
        unsigned long can_addr, addr = (unsigned long)lock;
 
+#ifdef __KERNEL__
+       /*
+        * lockdep_free_key_range() assumes that struct lock_class_key
+        * objects do not overlap. Since we use the address of lock
+        * objects as class key for static objects, check whether the
+        * size of lock_class_key objects does not exceed the size of
+        * the smallest lock object.
+        */
+       BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
+#endif
+
        if (__is_kernel_percpu_address(addr, &can_addr))
                lock->key = (void *)can_addr;
        else if (__is_module_percpu_address(addr, &can_addr))
-- 
2.20.1.97.g81188d93c3-goog

Reply via email to