Now that the verification check consistently is verified, remove it. It
is still kept in the patch series for illustration/testing purposes.

Signed-off-by: Joel Fernandes <[email protected]>
---
 kernel/rcu/tree.c | 20 --------------------
 1 file changed, 20 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 468388970c98..9d9d7c5ff3fc 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1900,26 +1900,6 @@ static noinline_for_stack bool rcu_gp_init(void)
                arch_spin_lock(&rcu_state.ofl_lock);
                raw_spin_lock_rcu_node(rnp);
                rcu_promote_blocked_tasks(rnp);
-#ifdef CONFIG_RCU_PER_CPU_BLOCKED_LISTS
-               /*
-                * Verify rdp lists consistent with rnp list. Since the unlock
-                * path removes from rdp before rnp, we can have tasks that are
-                * on rnp but not on rdp (in the middle of being removed).
-                * Therefore rnp_count >= rdp_total is the expected invariant.
-                */
-               rnp_count = 0;
-               rdp_total = 0;
-               list_for_each_entry(t_verify, &rnp->blkd_tasks, rcu_node_entry)
-                       rnp_count++;
-               for (cpu_verify = rnp->grplo; cpu_verify <= rnp->grphi; 
cpu_verify++) {
-                       rdp_cpu = per_cpu_ptr(&rcu_data, cpu_verify);
-                       raw_spin_lock(&rdp_cpu->blkd_lock);
-                       list_for_each_entry(t_rdp, &rdp_cpu->blkd_list, 
rcu_rdp_entry)
-                               rdp_total++;
-                       raw_spin_unlock(&rdp_cpu->blkd_lock);
-               }
-               WARN_ON_ONCE(rnp_count < rdp_total);
-#endif
                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
                    !rnp->wait_blkd_tasks) {
                        /* Nothing to do on this leaf rcu_node structure. */
-- 
2.34.1


Reply via email to