The current softirq_count() layout is designed as followed:

* Serving the softirq is done under SOFTIRQ_OFFSET. It makes the
  softirq_count() odd and since it can't nest, due to softirq serving
  not being re-entrant, it's fine to differenciate it from softirq
  disablement that use even values.

* Disable the softirq is done under SOFTIRQ_OFFSET * 2. This can nest,
  so increment of even values is fine to differenciate it from serving
  softirqs.

Now the design is going to change:

* Serving softirqs will need to be re-entrant to allow a vector to
  interrupt another.

* Disable softirqs can't nest anymore at the softirq_count() level.
  This is all driven by the vector disabled mask now.

In order to support this new layout, simply swap them. Serving softirqs
now use even value increments and disable softirqs now use odd value
toggle.

Signed-off-by: Frederic Weisbecker <frede...@kernel.org>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Sebastian Andrzej Siewior <bige...@linutronix.de>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: David S. Miller <da...@davemloft.net>
Cc: Mauro Carvalho Chehab <mche...@s-opensource.com>
Cc: Paul E. McKenney <paul...@linux.vnet.ibm.com>
---
 include/linux/bottom_half.h |  6 +++---
 include/linux/preempt.h     |  9 +++++----
 kernel/softirq.c            | 21 +++++++++++----------
 kernel/trace/ring_buffer.c  |  2 +-
 kernel/trace/trace.c        |  2 +-
 5 files changed, 21 insertions(+), 19 deletions(-)

diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index f8a68c8..74c986a 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -49,7 +49,7 @@ static __always_inline unsigned int 
__local_bh_disable_ip(unsigned long ip, unsi
 
 static inline unsigned int local_bh_disable(unsigned int mask)
 {
-       return __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET, mask);
+       return __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_OFFSET, mask);
 }
 
 extern void local_bh_enable_no_softirq(unsigned int bh);
@@ -58,12 +58,12 @@ extern void __local_bh_enable_ip(unsigned long ip,
 
 static inline void local_bh_enable_ip(unsigned long ip, unsigned int bh)
 {
-       __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET, bh);
+       __local_bh_enable_ip(ip, SOFTIRQ_OFFSET, bh);
 }
 
 static inline void local_bh_enable(unsigned int bh)
 {
-       __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET, bh);
+       __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_OFFSET, bh);
 }
 
 extern void local_bh_disable_all(void);
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index cf3fc3c..c4d9672 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -51,7 +51,8 @@
 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
 #define NMI_OFFSET     (1UL << NMI_SHIFT)
 
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#define SOFTIRQ_SERVING_OFFSET (2 * SOFTIRQ_OFFSET)
+#define SOFTIRQ_SERVING_MASK (SOFTIRQ_MASK & ~SOFTIRQ_OFFSET)
 
 /* We use the MSB mostly because its available */
 #define PREEMPT_NEED_RESCHED   0x80000000
@@ -101,10 +102,10 @@
 #define in_irq()               (hardirq_count())
 #define in_softirq()           (softirq_count())
 #define in_interrupt()         (irq_count())
-#define in_serving_softirq()   (softirq_count() & SOFTIRQ_OFFSET)
+#define in_serving_softirq()   (softirq_count() & ~SOFTIRQ_OFFSET)
 #define in_nmi()               (preempt_count() & NMI_MASK)
 #define in_task()              (!(preempt_count() & \
-                                  (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+                                  (NMI_MASK | HARDIRQ_MASK | 
SOFTIRQ_SERVING_MASK)))
 
 /*
  * The preempt_count offset after preempt_disable();
@@ -133,7 +134,7 @@
  *
  * Work as expected.
  */
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_OFFSET + PREEMPT_LOCK_OFFSET)
 
 /*
  * Are we running in atomic context?  WARNING: this macro cannot
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 84da16c..3efa59e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -163,13 +163,13 @@ void local_bh_enable_no_softirq(unsigned int bh)
        if (bh != SOFTIRQ_ALL_MASK)
                return;
                        
-       if (preempt_count() == SOFTIRQ_DISABLE_OFFSET)
+       if (preempt_count() == SOFTIRQ_OFFSET)
                trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
 
-       if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
+       if (softirq_count() == SOFTIRQ_OFFSET)
                trace_softirqs_on(_RET_IP_);
 
-       __preempt_count_sub(SOFTIRQ_DISABLE_OFFSET);
+       __preempt_count_sub(SOFTIRQ_OFFSET);
 }
 EXPORT_SYMBOL(local_bh_enable_no_softirq);
 
@@ -181,9 +181,10 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int 
cnt, unsigned int bh)
        local_irq_disable();
 #endif
        softirq_enabled_set(bh);
+
        if (bh != SOFTIRQ_ALL_MASK) {
                cnt &= ~SOFTIRQ_MASK;
-       } else if (!(softirq_count() & SOFTIRQ_OFFSET)) {
+       } else if (!(softirq_count() & SOFTIRQ_SERVING_MASK)) {
                /* Are softirqs going to be turned on now: */
                trace_softirqs_on(ip);
        }
@@ -235,15 +236,15 @@ static void local_bh_enter(unsigned long ip)
         * We must manually increment preempt_count here and manually
         * call the trace_preempt_off later.
         */
-       __preempt_count_add(SOFTIRQ_OFFSET);
+       __preempt_count_add(SOFTIRQ_SERVING_OFFSET);
        /*
         * Were softirqs turned off above:
         */
-       if (softirq_count() == SOFTIRQ_OFFSET)
+       if (softirq_count() == SOFTIRQ_SERVING_OFFSET)
                trace_softirqs_off(ip);
        raw_local_irq_restore(flags);
 
-       if (preempt_count() == SOFTIRQ_OFFSET) {
+       if (preempt_count() == SOFTIRQ_SERVING_OFFSET) {
 #ifdef CONFIG_DEBUG_PREEMPT
                current->preempt_disable_ip = get_lock_parent_ip();
 #endif
@@ -255,13 +256,13 @@ static void local_bh_exit(void)
 {
        lockdep_assert_irqs_disabled();
 
-       if (preempt_count() == SOFTIRQ_OFFSET)
+       if (preempt_count() == SOFTIRQ_SERVING_OFFSET)
                trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
 
-       if (softirq_count() == SOFTIRQ_OFFSET)
+       if (softirq_count() == SOFTIRQ_SERVING_OFFSET)
                trace_softirqs_on(_RET_IP_);
 
-       __preempt_count_sub(SOFTIRQ_OFFSET);
+       __preempt_count_sub(SOFTIRQ_SERVING_OFFSET);
 }
 
 /*
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 65bd461..0fedc5c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2655,7 +2655,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu 
*cpu_buffer)
        unsigned long pc = preempt_count();
        int bit;
 
-       if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+       if (in_task())
                bit = RB_CTX_NORMAL;
        else
                bit = pc & NMI_MASK ? RB_CTX_NMI :
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bf6f1d7..af1abd6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2143,7 +2143,7 @@ tracing_generic_entry_update(struct trace_entry *entry, 
unsigned long flags,
 #endif
                ((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
                ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
-               ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
+               ((pc & SOFTIRQ_SERVING_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
                (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
                (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
 }
-- 
2.7.4

Reply via email to