Debugging lockdep data structure inconsistencies is challenging. Add
code that verifies data structure consistency at runtime. That code is
disabled by default because it is very CPU intensive.

Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Waiman Long <long...@redhat.com>
Cc: Johannes Berg <johan...@sipsolutions.net>
Signed-off-by: Bart Van Assche <bvanass...@acm.org>
---
 kernel/locking/lockdep.c | 170 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 170 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a8ea03bfc944..acf61dbb8b30 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -74,6 +74,8 @@ module_param(lock_stat, int, 0644);
 #define lock_stat 0
 #endif
 
+static bool check_data_structure_consistency;
+
 /*
  * lockdep_lock: protects the lockdep graph, the hashes and the
  *               class/list/hash allocators.
@@ -760,6 +762,81 @@ static bool assign_lock_key(struct lockdep_map *lock)
        return true;
 }
 
+/* Check whether element @e occurs in list @h */
+static bool in_list(struct list_head *e, struct list_head *h)
+{
+       struct list_head *f;
+
+       list_for_each(f, h) {
+               if (e == f)
+                       return true;
+       }
+
+       return false;
+}
+
+/*
+ * Check whether entry @e occurs in any of the locks_after or locks_before
+ * lists.
+ */
+static bool in_any_class_list(struct list_head *e)
+{
+       struct lock_class *class;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
+               class = &lock_classes[i];
+               if (in_list(e, &class->locks_after) ||
+                   in_list(e, &class->locks_before))
+                       return true;
+       }
+       return false;
+}
+
+static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
+{
+       struct lock_list *e;
+
+       list_for_each_entry(e, h, entry) {
+               if (e->links_to != c) {
+                       printk(KERN_INFO "class %s: mismatch for lock entry 
%ld; class %s <> %s",
+                              c->name ? : "(?)",
+                              (unsigned long)(e - list_entries),
+                              e->links_to && e->links_to->name ?
+                              e->links_to->name : "(?)",
+                              e->class && e->class->name ? e->class->name :
+                              "(?)");
+                       return false;
+               }
+       }
+       return true;
+}
+
+static u16 chain_hlocks[];
+
+static bool check_lock_chain_key(struct lock_chain *chain)
+{
+#ifdef CONFIG_PROVE_LOCKING
+       u64 chain_key = 0;
+       int i;
+
+       for (i = chain->base; i < chain->base + chain->depth; i++)
+               chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
+       /*
+        * The 'unsigned long long' casts avoid that a compiler warning
+        * is reported when building tools/lib/lockdep.
+        */
+       if (chain->chain_key != chain_key)
+               printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
+                      (unsigned long long)(chain - lock_chains),
+                      (unsigned long long)chain->chain_key,
+                      (unsigned long long)chain_key);
+       return chain->chain_key == chain_key;
+#else
+       return true;
+#endif
+}
+
 static bool list_entry_being_freed(int list_entry_idx)
 {
        struct pending_free *pf;
@@ -773,6 +850,97 @@ static bool list_entry_being_freed(int list_entry_idx)
        return false;
 }
 
+static bool in_any_zapped_class_list(struct lock_class *class)
+{
+       struct pending_free *pf;
+       int i;
+
+       for (i = 0, pf = pending_free; i < ARRAY_SIZE(pending_free);
+            i++, pf++)
+               if (in_list(&class->lock_entry, &pf->zapped_classes))
+                       return true;
+
+       return false;
+}
+
+static bool check_data_structures(void)
+{
+       struct lock_class *class;
+       struct lock_chain *chain;
+       struct hlist_head *head;
+       struct lock_list *e;
+       int i;
+
+       /* Check whether all classes occur in a lock list. */
+       for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
+               class = &lock_classes[i];
+               if (!in_list(&class->lock_entry, &all_lock_classes) &&
+                   !in_list(&class->lock_entry, &free_lock_classes) &&
+                   !in_any_zapped_class_list(class)) {
+                       printk(KERN_INFO "class %px/%s is not in any class 
list\n",
+                              class, class->name ? : "(?)");
+                       return false;
+                       return false;
+               }
+       }
+
+       /* Check whether all classes have valid lock lists. */
+       for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
+               class = &lock_classes[i];
+               if (!class_lock_list_valid(class, &class->locks_before))
+                       return false;
+               if (!class_lock_list_valid(class, &class->locks_after))
+                       return false;
+       }
+
+       /* Check the chain_key of all lock chains. */
+       for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
+               head = chainhash_table + i;
+               hlist_for_each_entry_rcu(chain, head, entry) {
+                       if (!check_lock_chain_key(chain))
+                               return false;
+               }
+       }
+
+       /*
+        * Check whether all list entries that are in use occur in a class
+        * lock list.
+        */
+       for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
+               if (list_entry_being_freed(i))
+                       continue;
+               e = list_entries + i;
+               if (!in_any_class_list(&e->entry)) {
+                       printk(KERN_INFO "list entry %d is not in any class 
list; class %s <> %s\n",
+                              (unsigned int)(e - list_entries),
+                              e->class->name ? : "(?)",
+                              e->links_to->name ? : "(?)");
+                       return false;
+               }
+       }
+
+       /*
+        * Check whether all list entries that are not in use do not occur in
+        * a class lock list.
+        */
+       for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
+               if (WARN_ON_ONCE(list_entry_being_freed(i)))
+                       return false;
+               e = list_entries + i;
+               if (in_any_class_list(&e->entry)) {
+                       printk(KERN_INFO "list entry %d occurs in a class list; 
class %s <> %s\n",
+                              (unsigned int)(e - list_entries),
+                              e->class && e->class->name ? e->class->name :
+                              "(?)",
+                              e->links_to && e->links_to->name ?
+                              e->links_to->name : "(?)");
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 /*
  * Initialize the lock_classes[] array elements, the free_lock_classes list
  * and also the pending_free[] array.
@@ -4376,6 +4544,8 @@ static void free_zapped_classes(struct rcu_head *ch)
        if (!graph_lock())
                goto restore_irqs;
        pf->scheduled = false;
+       if (check_data_structure_consistency)
+               WARN_ON_ONCE(!check_data_structures());
        list_for_each_entry(class, &pf->zapped_classes, lock_entry) {
                reinit_class(class);
        }
-- 
2.20.1.97.g81188d93c3-goog

Reply via email to