There is no need to process softirqs if none of those pending are enabled. Check about that early to avoid unnecessary overhead.
Reviewed-by: David S. Miller <da...@davemloft.net> Signed-off-by: Frederic Weisbecker <frede...@kernel.org> Cc: Mauro Carvalho Chehab <mchehab+sams...@kernel.org> Cc: Joel Fernandes <j...@joelfernandes.org> Cc: Thomas Gleixner <t...@linutronix.de> Cc: Pavan Kondeti <pkond...@codeaurora.org> Cc: Paul E . McKenney <paul...@linux.vnet.ibm.com> Cc: David S . Miller <da...@davemloft.net> Cc: Ingo Molnar <mi...@kernel.org> Cc: Sebastian Andrzej Siewior <bige...@linutronix.de> Cc: Linus Torvalds <torva...@linux-foundation.org> Cc: Peter Zijlstra <pet...@infradead.org> --- include/linux/interrupt.h | 5 +++++ kernel/softirq.c | 21 +++++++++++---------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 1f4bd62ae218..2bb8b7fcfb15 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -510,6 +510,11 @@ static inline void softirq_pending_set_mask(unsigned int pending) } #endif /* local_softirq_data */ +static inline int softirq_pending_enabled(void) +{ + return local_softirq_pending() & local_softirq_enabled(); +} + /* map softirq index to softirq name. update 'softirq_to_name' in * kernel/softirq.c when adding a new softirq. */ diff --git a/kernel/softirq.c b/kernel/softirq.c index e957615102dc..6c703bbb718b 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -183,7 +183,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) */ preempt_count_sub(cnt - 1); - if (unlikely(!in_interrupt() && local_softirq_pending())) { + if (unlikely(!in_interrupt() && softirq_pending_enabled())) { /* * Run softirq if any pending. And do it in its own stack * as we may be calling this deep in a task call stack already. @@ -252,7 +252,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) int max_restart = MAX_SOFTIRQ_RESTART; struct softirq_action *h; bool in_hardirq; - __u32 pending; + __u32 pending, enabled; int softirq_bit; /* @@ -262,7 +262,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) */ current->flags &= ~PF_MEMALLOC; - pending = local_softirq_pending(); + enabled = local_softirq_enabled(); + pending = local_softirq_pending() & enabled; account_irq_enter_time(current); __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); @@ -270,7 +271,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) restart: /* Reset the pending bitmask before enabling irqs */ - softirq_pending_clear_mask(SOFTIRQ_ALL_MASK); + softirq_pending_clear_mask(pending); local_irq_enable(); @@ -307,7 +308,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) rcu_softirq_qs(); local_irq_disable(); - pending = local_softirq_pending(); + pending = local_softirq_pending() & enabled; if (pending) { if (time_before(jiffies, end) && !need_resched() && --max_restart) @@ -333,7 +334,7 @@ asmlinkage __visible void do_softirq(void) local_irq_save(flags); - pending = local_softirq_pending(); + pending = softirq_pending_enabled(); if (pending && !ksoftirqd_running(pending)) do_softirq_own_stack(); @@ -362,7 +363,7 @@ void irq_enter(void) static inline void invoke_softirq(void) { - if (ksoftirqd_running(local_softirq_pending())) + if (ksoftirqd_running(softirq_pending_enabled())) return; if (!force_irqthreads) { @@ -411,7 +412,7 @@ void irq_exit(void) #endif account_irq_exit_time(current); preempt_count_sub(HARDIRQ_OFFSET); - if (!in_interrupt() && local_softirq_pending()) + if (!in_interrupt() && softirq_pending_enabled()) invoke_softirq(); tick_irq_exit(); @@ -642,13 +643,13 @@ void __init softirq_init(void) static int ksoftirqd_should_run(unsigned int cpu) { - return local_softirq_pending(); + return softirq_pending_enabled(); } static void run_ksoftirqd(unsigned int cpu) { local_irq_disable(); - if (local_softirq_pending()) { + if (softirq_pending_enabled()) { /* * We can safely run softirq on inline stack, as we are not deep * in the task stack here. -- 2.21.0