This code is unused and can be removed now. Revert was straightforward.

Tested with rcutorture on all TREE configurations.

Link: 
http://lore.kernel.org/r/CALCETrWNPOOdTrFabTDd=h7+wc6xj9rjceg6ol1s0rtv5pf...@mail.gmail.com
Suggested-by: Andy Lutomirski <l...@kernel.org>
Signed-off-by: Joel Fernandes (Google) <j...@joelfernandes.org>
---
 include/linux/rcutiny.h |  3 --
 kernel/rcu/tree.c       | 93 +++++++++++------------------------------
 2 files changed, 24 insertions(+), 72 deletions(-)

diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 3465ba704a111..dbcddc7b26b94 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -14,9 +14,6 @@
 
 #include <asm/param.h> /* for HZ */
 
-/* Never flag non-existent other CPUs! */
-static inline bool rcu_eqs_special_set(int cpu) { return false; }
-
 static inline unsigned long get_state_synchronize_rcu(void)
 {
        return 0;
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 6d39485f7f517..1ec7b1d4a03c4 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -82,20 +82,10 @@
 
 /* Data structures. */
 
-/*
- * Steal a bit from the bottom of ->dynticks for idle entry/exit
- * control.  Initially this is for TLB flushing.
- */
-#define RCU_DYNTICK_CTRL_MASK 0x1
-#define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
-#ifndef rcu_eqs_special_exit
-#define rcu_eqs_special_exit() do { } while (0)
-#endif
-
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
        .dynticks_nesting = 1,
        .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
-       .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
+       .dynticks = ATOMIC_INIT(1),
 };
 static struct rcu_state rcu_state = {
        .level = { &rcu_state.node[0] },
@@ -245,21 +235,18 @@ void rcu_softirq_qs(void)
 static void rcu_dynticks_eqs_enter(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
-       int seq;
+       int special;
+
+       rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
 
        /*
-        * CPUs seeing atomic_add_return() must see prior RCU read-side
+        * CPUs seeing atomic_inc_return() must see prior RCU read-side
         * critical sections, and we also must force ordering with the
         * next idle sojourn.
         */
-       rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
-       seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       special = atomic_inc_return(&rdp->dynticks);
        // RCU is no longer watching.  Better be in extended quiescent state!
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    (seq & RCU_DYNTICK_CTRL_CTR));
-       /* Better not have special action (TLB flush) pending! */
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    (seq & RCU_DYNTICK_CTRL_MASK));
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && special & 0x1);
 }
 
 /*
@@ -270,24 +257,18 @@ static void rcu_dynticks_eqs_enter(void)
 static void rcu_dynticks_eqs_exit(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
-       int seq;
+       int special;
 
        /*
-        * CPUs seeing atomic_add_return() must see prior idle sojourns,
+        * CPUs seeing atomic_inc_return() must see prior idle sojourns,
         * and we also must force ordering with the next RCU read-side
         * critical section.
         */
-       seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       special = atomic_inc_return(&rdp->dynticks);
        // RCU is now watching.  Better not be in an extended quiescent state!
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(special & 0x1));
+
        rcu_dynticks_task_trace_exit();  // After ->dynticks update!
-       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
-                    !(seq & RCU_DYNTICK_CTRL_CTR));
-       if (seq & RCU_DYNTICK_CTRL_MASK) {
-               atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
-               smp_mb__after_atomic(); /* _exit after clearing mask. */
-               /* Prefer duplicate flushes to losing a flush. */
-               rcu_eqs_special_exit();
-       }
 }
 
 /*
@@ -304,9 +285,9 @@ static void rcu_dynticks_eqs_online(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
-       if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
+       if (atomic_read(&rdp->dynticks) & 0x1)
                return;
-       atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       atomic_add(0x1, &rdp->dynticks);
 }
 
 /*
@@ -318,7 +299,7 @@ static bool rcu_dynticks_curr_cpu_in_eqs(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
-       return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
+       return !(atomic_read(&rdp->dynticks) & 0x1);
 }
 
 /*
@@ -329,7 +310,7 @@ static int rcu_dynticks_snap(struct rcu_data *rdp)
 {
        int snap = atomic_add_return(0, &rdp->dynticks);
 
-       return snap & ~RCU_DYNTICK_CTRL_MASK;
+       return snap;
 }
 
 /*
@@ -338,7 +319,7 @@ static int rcu_dynticks_snap(struct rcu_data *rdp)
  */
 static bool rcu_dynticks_in_eqs(int snap)
 {
-       return !(snap & RCU_DYNTICK_CTRL_CTR);
+       return !(snap & 0x1);
 }
 
 /*
@@ -361,8 +342,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
        int snap;
 
        // If not quiescent, force back to earlier extended quiescent state.
-       snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
-                                              RCU_DYNTICK_CTRL_CTR);
+       snap = atomic_read(&rdp->dynticks) & ~(0x1);
 
        smp_rmb(); // Order ->dynticks and *vp reads.
        if (READ_ONCE(*vp))
@@ -370,32 +350,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
        smp_rmb(); // Order *vp read and ->dynticks re-read.
 
        // If still in the same extended quiescent state, we are good!
-       return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
-}
-
-/*
- * Set the special (bottom) bit of the specified CPU so that it
- * will take special action (such as flushing its TLB) on the
- * next exit from an extended quiescent state.  Returns true if
- * the bit was successfully set, or false if the CPU was not in
- * an extended quiescent state.
- */
-bool rcu_eqs_special_set(int cpu)
-{
-       int old;
-       int new;
-       int new_old;
-       struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
-
-       new_old = atomic_read(&rdp->dynticks);
-       do {
-               old = new_old;
-               if (old & RCU_DYNTICK_CTRL_CTR)
-                       return false;
-               new = old | RCU_DYNTICK_CTRL_MASK;
-               new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
-       } while (new_old != old);
-       return true;
+       return snap == atomic_read(&rdp->dynticks);
 }
 
 /*
@@ -411,13 +366,13 @@ bool rcu_eqs_special_set(int cpu)
  */
 void rcu_momentary_dyntick_idle(void)
 {
-       int special;
+       struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
+       int special = atomic_add_return(2, &rdp->dynticks);
 
-       raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
-       special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
-                                   &this_cpu_ptr(&rcu_data)->dynticks);
        /* It is illegal to call this from idle state. */
-       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
+       WARN_ON_ONCE(!(special & 0x1));
+
+       raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
        rcu_preempt_deferred_qs(current);
 }
 EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
-- 
2.26.2.526.g744177e7f7-goog

Reply via email to