With recursive-read locks, a circle is not sufficient condition for a
deadlock. As a result, we need to continue the search after a match but
the match is not wanted. __bfs() is adjusted to that end. The basic idea
is to enqueue a node's children before matching the node.

No functional change.

Signed-off-by: Yuyang Du <duyuy...@gmail.com>
---
 kernel/locking/lockdep.c | 51 ++++++++++++++++++++++++------------------------
 1 file changed, 26 insertions(+), 25 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 10df8eb..7d02b94 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1551,7 +1551,7 @@ static inline void set_lock_type2(struct lock_list *lock, 
int read)
 static int __bfs(struct lock_list *source_entry,
                 void *data,
                 int (*match)(struct lock_list *entry, void *data),
-                struct lock_list **target_entry, int forward)
+                struct lock_list **target_entry, int forward, bool init)
 {
        struct lock_list *entry;
        struct lock_list *lock;
@@ -1559,19 +1559,11 @@ static int __bfs(struct lock_list *source_entry,
        struct circular_queue *cq = &lock_cq;
        int ret = 1;
 
-       if (match(source_entry, data)) {
-               *target_entry = source_entry;
-               ret = 0;
-               goto exit;
+       if (init) {
+               __cq_init(cq);
+               __cq_enqueue(cq, source_entry);
        }
 
-       head = get_dep_list(source_entry, forward);
-       if (list_empty(head))
-               goto exit;
-
-       __cq_init(cq);
-       __cq_enqueue(cq, source_entry);
-
        while ((lock = __cq_dequeue(cq))) {
 
                if (!lock->class[forward]) {
@@ -1583,25 +1575,34 @@ static int __bfs(struct lock_list *source_entry,
 
                DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
+               /*
+                * Enqueue a node's children before match() so that even
+                * this node matches but is not wanted, we can continue
+                * the search.
+                */
                list_for_each_entry_rcu(entry, head, entry[forward]) {
                        if (!lock_accessed(entry, forward)) {
                                unsigned int cq_depth;
+
                                mark_lock_accessed(entry, lock, forward);
-                               if (match(entry, data)) {
-                                       *target_entry = entry;
-                                       ret = 0;
-                                       goto exit;
-                               }
 
                                if (__cq_enqueue(cq, entry)) {
                                        ret = -1;
                                        goto exit;
                                }
+
                                cq_depth = __cq_get_elem_count(cq);
                                if (max_bfs_queue_depth < cq_depth)
                                        max_bfs_queue_depth = cq_depth;
                        }
                }
+
+               if (match(lock, data)) {
+                       *target_entry = lock;
+                       ret = 0;
+                       goto exit;
+               }
+
        }
 exit:
        return ret;
@@ -1610,9 +1611,9 @@ static int __bfs(struct lock_list *source_entry,
 static inline int __bfs_forwards(struct lock_list *src_entry,
                        void *data,
                        int (*match)(struct lock_list *entry, void *data),
-                       struct lock_list **target_entry)
+                       struct lock_list **target_entry, bool init)
 {
-       return __bfs(src_entry, data, match, target_entry, 1);
+       return __bfs(src_entry, data, match, target_entry, 1, init);
 }
 
 static inline int __bfs_backwards(struct lock_list *src_entry,
@@ -1620,7 +1621,7 @@ static inline int __bfs_backwards(struct lock_list 
*src_entry,
                        int (*match)(struct lock_list *entry, void *data),
                        struct lock_list **target_entry)
 {
-       return __bfs(src_entry, data, match, target_entry, 0);
+       return __bfs(src_entry, data, match, target_entry, 0, true);
 }
 
 static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
@@ -1792,7 +1793,7 @@ static unsigned long __lockdep_count_forward_deps(struct 
lock_list *this)
        unsigned long  count = 0;
        struct lock_list *uninitialized_var(target_entry);
 
-       __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
+       __bfs_forwards(this, (void *)&count, noop_count, &target_entry, true);
 
        return count;
 }
@@ -1846,12 +1847,12 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
  */
 static noinline int
 check_path(struct lock_class *target, struct lock_list *src_entry,
-          struct lock_list **target_entry)
+          struct lock_list **target_entry, bool init)
 {
        int ret;
 
        ret = __bfs_forwards(src_entry, (void *)target, class_equal,
-                            target_entry);
+                            target_entry, init);
 
        if (unlikely(ret < 0))
                print_bfs_bug(ret);
@@ -1879,7 +1880,7 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
 
        debug_atomic_inc(nr_cyclic_checks);
 
-       ret = check_path(hlock_class(target), &src_entry, &target_entry);
+       ret = check_path(hlock_class(target), &src_entry, &target_entry, true);
 
        if (unlikely(!ret)) {
                if (!trace->nr_entries) {
@@ -1940,7 +1941,7 @@ static inline int usage_match_backward(struct lock_list 
*entry, void *mask)
        debug_atomic_inc(nr_find_usage_forwards_checks);
 
        result = __bfs_forwards(root, &usage_mask, usage_match_forward,
-                               target_entry);
+                               target_entry, true);
 
        return result;
 }
-- 
1.8.3.1

Reply via email to