Commit-ID:  956f3563a8387beb7758f2e8ee483639ef91afc6
Gitweb:     https://git.kernel.org/tip/956f3563a8387beb7758f2e8ee483639ef91afc6
Author:     Bart Van Assche <bvanass...@acm.org>
AuthorDate: Thu, 14 Feb 2019 15:00:43 -0800
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Thu, 28 Feb 2019 07:55:42 +0100

locking/lockdep: Split lockdep_free_key_range() and lockdep_reset_lock()

This patch does not change the behavior of these functions but makes the
patch that frees unused lock classes easier to read.

Signed-off-by: Bart Van Assche <bvanass...@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Johannes Berg <johan...@sipsolutions.net>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Paul E. McKenney <paul...@linux.vnet.ibm.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Waiman Long <long...@redhat.com>
Cc: Will Deacon <will.dea...@arm.com>
Cc: johannes.b...@intel.com
Cc: t...@kernel.org
Link: https://lkml.kernel.org/r/20190214230058.196511-9-bvanass...@acm.org
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 kernel/locking/lockdep.c | 72 ++++++++++++++++++++++++------------------------
 1 file changed, 36 insertions(+), 36 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d1a6daf1f51f..2d4c21a02546 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4160,6 +4160,24 @@ static inline int within(const void *addr, void *start, 
unsigned long size)
        return addr >= start && addr < start + size;
 }
 
+static void __lockdep_free_key_range(void *start, unsigned long size)
+{
+       struct lock_class *class;
+       struct hlist_head *head;
+       int i;
+
+       /* Unhash all classes that were created by a module. */
+       for (i = 0; i < CLASSHASH_SIZE; i++) {
+               head = classhash_table + i;
+               hlist_for_each_entry_rcu(class, head, hash_entry) {
+                       if (!within(class->key, start, size) &&
+                           !within(class->name, start, size))
+                               continue;
+                       zap_class(class);
+               }
+       }
+}
+
 /*
  * Used in module.c to remove lock classes from memory that is going to be
  * freed; and possibly re-used by other modules.
@@ -4170,30 +4188,14 @@ static inline int within(const void *addr, void *start, 
unsigned long size)
  */
 void lockdep_free_key_range(void *start, unsigned long size)
 {
-       struct lock_class *class;
-       struct hlist_head *head;
        unsigned long flags;
-       int i;
        int locked;
 
        init_data_structures_once();
 
        raw_local_irq_save(flags);
        locked = graph_lock();
-
-       /*
-        * Unhash all classes that were created by this module:
-        */
-       for (i = 0; i < CLASSHASH_SIZE; i++) {
-               head = classhash_table + i;
-               hlist_for_each_entry_rcu(class, head, hash_entry) {
-                       if (within(class->key, start, size))
-                               zap_class(class);
-                       else if (within(class->name, start, size))
-                               zap_class(class);
-               }
-       }
-
+       __lockdep_free_key_range(start, size);
        if (locked)
                graph_unlock();
        raw_local_irq_restore(flags);
@@ -4235,16 +4237,11 @@ static bool lock_class_cache_is_registered(struct 
lockdep_map *lock)
        return false;
 }
 
-void lockdep_reset_lock(struct lockdep_map *lock)
+/* The caller must hold the graph lock. Does not sleep. */
+static void __lockdep_reset_lock(struct lockdep_map *lock)
 {
        struct lock_class *class;
-       unsigned long flags;
-       int j, locked;
-
-       init_data_structures_once();
-
-       raw_local_irq_save(flags);
-       locked = graph_lock();
+       int j;
 
        /*
         * Remove all classes this lock might have:
@@ -4261,19 +4258,22 @@ void lockdep_reset_lock(struct lockdep_map *lock)
         * Debug check: in the end all mapped classes should
         * be gone.
         */
-       if (unlikely(lock_class_cache_is_registered(lock))) {
-               if (debug_locks_off_graph_unlock()) {
-                       /*
-                        * We all just reset everything, how did it match?
-                        */
-                       WARN_ON(1);
-               }
-               goto out_restore;
-       }
+       if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
+               debug_locks_off();
+}
+
+void lockdep_reset_lock(struct lockdep_map *lock)
+{
+       unsigned long flags;
+       int locked;
+
+       init_data_structures_once();
+
+       raw_local_irq_save(flags);
+       locked = graph_lock();
+       __lockdep_reset_lock(lock);
        if (locked)
                graph_unlock();
-
-out_restore:
        raw_local_irq_restore(flags);
 }
 

Reply via email to