Indirect dependency redundancy check was added for cross-release, which has
been reverted. Then suggested by Peter, only when CONFIG_LOCKDEP_SMALL is
set it takes effect.

With (recursive) read-write lock types considered in dependency graph,
indirect dependency redundancy check would be quite complicated to
implement. Lets remove it for good. This inevitably increases the number of
dependencies, but after combining forward and backward dependencies, the
increase will be offset.

Signed-off-by: Yuyang Du <duyuy...@gmail.com>
---
 kernel/locking/lockdep.c           | 41 --------------------------------------
 kernel/locking/lockdep_internals.h |  1 -
 kernel/locking/lockdep_proc.c      |  2 --
 3 files changed, 44 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a0e62e5..4838c99 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1812,38 +1812,6 @@ unsigned long lockdep_count_backward_deps(struct 
lock_class *class)
        return ret;
 }
 
-#ifdef CONFIG_LOCKDEP_SMALL
-/*
- * Check that the dependency graph starting at <src> can lead to
- * <target> or not. If it can, <src> -> <target> dependency is already
- * in the graph.
- *
- * Print an error and return 2 if it does or 1 if it does not.
- */
-static noinline int
-check_redundant(struct held_lock *src, struct held_lock *target)
-{
-       int ret;
-       struct lock_list *uninitialized_var(target_entry);
-       struct lock_list src_entry = {
-               .class = hlock_class(src),
-               .parent = NULL,
-       };
-
-       debug_atomic_inc(nr_redundant_checks);
-
-       ret = check_path(hlock_class(target), &src_entry, &target_entry);
-
-       if (!ret) {
-               debug_atomic_inc(nr_redundant);
-               ret = 2;
-       } else if (ret < 0)
-               ret = 0;
-
-       return ret;
-}
-#endif
-
 #ifdef CONFIG_TRACE_IRQFLAGS
 
 static inline int usage_accumulate(struct lock_list *entry, void *mask)
@@ -2507,15 +2475,6 @@ static inline void inc_chains(void)
                }
        }
 
-#ifdef CONFIG_LOCKDEP_SMALL
-       /*
-        * Is the <prev> -> <next> link redundant?
-        */
-       ret = check_redundant(prev, next);
-       if (ret != 1)
-               return ret;
-#endif
-
        if (!*trace) {
                *trace = save_trace();
                if (!*trace)
diff --git a/kernel/locking/lockdep_internals.h 
b/kernel/locking/lockdep_internals.h
index 18d85ae..f499426 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -177,7 +177,6 @@ struct lockdep_stats {
        unsigned long  redundant_softirqs_on;
        unsigned long  redundant_softirqs_off;
        int            nr_unused_locks;
-       unsigned int   nr_redundant_checks;
        unsigned int   nr_redundant;
        unsigned int   nr_cyclic_checks;
        unsigned int   nr_find_usage_forwards_checks;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index dadb7b7..edc4a7b 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -178,8 +178,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
                debug_atomic_read(chain_lookup_hits));
        seq_printf(m, " cyclic checks:                 %11llu\n",
                debug_atomic_read(nr_cyclic_checks));
-       seq_printf(m, " redundant checks:              %11llu\n",
-               debug_atomic_read(nr_redundant_checks));
        seq_printf(m, " redundant links:               %11llu\n",
                debug_atomic_read(nr_redundant));
        seq_printf(m, " find-mask forwards checks:     %11llu\n",
-- 
1.8.3.1

Reply via email to