Remove the old protections that prevented softirqs from interrupting any
softirq-disabled context. Now that we can disable specific vectors on
a given piece of code, we want to be able to soft-interrupt those places
with other vectors.

Signed-off-by: Frederic Weisbecker <frede...@kernel.org>
Cc: Mauro Carvalho Chehab <mche...@s-opensource.com>
Cc: Joel Fernandes <j...@joelfernandes.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Pavan Kondeti <pkond...@codeaurora.org>
Cc: Paul E . McKenney <paul...@linux.vnet.ibm.com>
Cc: David S . Miller <da...@davemloft.net>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Sebastian Andrzej Siewior <bige...@linutronix.de>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
---
 kernel/softirq.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/kernel/softirq.c b/kernel/softirq.c
index 4a32effbb1fc..42dfcdfa423b 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -238,7 +238,7 @@ static void local_bh_enable_ip_mask(unsigned long ip, 
unsigned int cnt,
         */
        preempt_count_sub(cnt - 1);
 
-       if (unlikely(!in_interrupt() && softirq_pending_enabled())) {
+       if (unlikely(softirq_pending_enabled())) {
                /*
                 * Run softirq if any pending. And do it in its own stack
                 * as we may be calling this deep in a task call stack already.
@@ -388,7 +388,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
        lockdep_softirq_end(in_hardirq);
        account_irq_exit_time(current);
        __local_bh_enable_no_softirq(SOFTIRQ_OFFSET);
-       WARN_ON_ONCE(in_interrupt());
+       WARN_ON_ONCE(in_irq());
        current_restore_flags(old_flags, PF_MEMALLOC);
 }
 
@@ -397,7 +397,7 @@ asmlinkage __visible void do_softirq(void)
        __u32 pending;
        unsigned long flags;
 
-       if (in_interrupt())
+       if (in_irq())
                return;
 
        local_irq_save(flags);
@@ -480,7 +480,7 @@ void irq_exit(void)
 #endif
        account_irq_exit_time(current);
        preempt_count_sub(HARDIRQ_OFFSET);
-       if (!in_interrupt() && softirq_pending_enabled())
+       if (!in_irq() && softirq_pending_enabled())
                invoke_softirq();
 
        tick_irq_exit();
@@ -504,7 +504,7 @@ inline void raise_softirq_irqoff(unsigned int nr)
         * Otherwise we wake up ksoftirqd to make sure we
         * schedule the softirq soon.
         */
-       if (!in_interrupt())
+       if (!in_irq())
                wakeup_softirqd();
 }
 
-- 
2.17.1

Reply via email to