The check for irqs_were_disabled is redundant in
rcu_unlock_needs_exp_handling() as the caller already checks for this.
This includes the boost case as well. Just remove the redundant check.

This is a first win for the refactor of the needs_exp (formerly
expboost) condition into a new rcu_unlock_needs_exp_handling() function,
as the conditions became more easier to read.

Signed-off-by: Joel Fernandes <[email protected]>
---
 kernel/rcu/tree_plugin.h | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)

diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index e20c17163c13..7d03d81e45f6 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -659,14 +659,12 @@ static void rcu_preempt_deferred_qs_handler(struct 
irq_work *iwp)
  * @t: The task being checked
  * @rdp: The per-CPU RCU data
  * @rnp: The RCU node for this CPU
- * @irqs_were_disabled: Whether interrupts were disabled before 
rcu_read_unlock()
  *
  * Returns true if expedited processing of the rcu_read_unlock() is needed.
  */
 static bool rcu_unlock_needs_exp_handling(struct task_struct *t,
                                      struct rcu_data *rdp,
-                                     struct rcu_node *rnp,
-                                     bool irqs_were_disabled)
+                                     struct rcu_node *rnp)
 {
        /*
         * Check if this task is blocking an expedited grace period. If the
@@ -702,14 +700,14 @@ static bool rcu_unlock_needs_exp_handling(struct 
task_struct *t,
 
        /*
         * RCU priority boosting case: If a task is subject to RCU priority
-        * boosting and exits an RCU read-side critical section with interrupts
-        * disabled, we need expedited handling to ensure timely deboosting.
-        * Without this, a low-priority task could incorrectly run at high
-        * real-time priority for an extended period degrading real-time
+        * boosting and exits an RCU read-side critical section, we need
+        * expedited handling to ensure timely deboosting. Without this,
+        * a low-priority task could incorrectly run at high real-time
+        * priority for an extended period degrading real-time
         * responsiveness. This applies to all CONFIG_RCU_BOOST=y kernels,
         * not just to PREEMPT_RT.
         */
-       if (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled && 
t->rcu_blocked_node)
+       if (IS_ENABLED(CONFIG_RCU_BOOST) && t->rcu_blocked_node)
                return true;
 
        return false;
@@ -738,7 +736,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
                struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
                struct rcu_node *rnp = rdp->mynode;
 
-               needs_exp = rcu_unlock_needs_exp_handling(t, rdp, rnp, 
irqs_were_disabled);
+               needs_exp = rcu_unlock_needs_exp_handling(t, rdp, rnp);
        
                // Need to defer quiescent state until everything is enabled.
                if (use_softirq && (in_hardirq() || (needs_exp && 
!irqs_were_disabled))) {
-- 
2.34.1


Reply via email to