__do_softirq() serves all pending softirqs.
As we need to separate softirqs by different groups, we need to serve
softirqs from one group and deffer softirqs from the other.
Change __do_softirq() so it'll have a mask of softirqs it needs to
serve instead of servicing all pending softirqs.

Signed-off-by: Dmitry Safonov <d...@arista.com>
---
 include/linux/interrupt.h |  8 ++++----
 kernel/softirq.c          | 27 ++++++++++++++-------------
 2 files changed, 18 insertions(+), 17 deletions(-)

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5bb6b435f0bb..2ea09896bd6e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -490,14 +490,14 @@ struct softirq_action
 };
 
 asmlinkage void do_softirq(void);
-asmlinkage void __do_softirq(void);
+asmlinkage void __do_softirq(__u32 mask);
 
 #ifdef __ARCH_HAS_DO_SOFTIRQ
-void do_softirq_own_stack(void);
+void do_softirq_own_stack(__u32 mask);
 #else
-static inline void do_softirq_own_stack(void)
+static inline void do_softirq_own_stack(__u32 mask)
 {
-       __do_softirq();
+       __do_softirq(mask);
 }
 #endif
 
diff --git a/kernel/softirq.c b/kernel/softirq.c
index c9aecdd57107..ca8c3db4570d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -240,7 +240,7 @@ static inline bool lockdep_softirq_start(void) { return 
false; }
 static inline void lockdep_softirq_end(bool in_hardirq) { }
 #endif
 
-asmlinkage __visible void __softirq_entry __do_softirq(void)
+asmlinkage __visible void __softirq_entry __do_softirq(__u32 mask)
 {
        unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
        unsigned long old_flags = current->flags;
@@ -265,7 +265,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
 
 restart:
        /* Reset the pending bitmask before enabling irqs */
-       set_softirq_pending(0);
+       set_softirq_pending(pending & ~mask);
+       pending &= mask;
 
        local_irq_enable();
 
@@ -299,7 +300,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
        local_irq_disable();
 
        pending = local_softirq_pending();
-       if (pending) {
+       if (pending & mask) {
                if (time_before(jiffies, end) && !need_resched() &&
                    --max_restart)
                        goto restart;
@@ -316,18 +317,16 @@ asmlinkage __visible void __softirq_entry 
__do_softirq(void)
 
 asmlinkage __visible void do_softirq(void)
 {
-       __u32 pending;
+       __u32 pending = local_softirq_pending();
        unsigned long flags;
 
-       if (in_interrupt())
+       if (in_interrupt() || !pending)
                return;
 
        local_irq_save(flags);
 
-       pending = local_softirq_pending();
-
-       if (pending && !ksoftirqd_running())
-               do_softirq_own_stack();
+       if (!ksoftirqd_running())
+               do_softirq_own_stack(pending);
 
        local_irq_restore(flags);
 }
@@ -353,7 +352,9 @@ void irq_enter(void)
 
 static inline void invoke_softirq(void)
 {
-       if (ksoftirqd_running())
+       __u32 pending = local_softirq_pending();
+
+       if (!pending || !ksoftirqd_running())
                return;
 
        if (!force_irqthreads) {
@@ -363,14 +364,14 @@ static inline void invoke_softirq(void)
                 * it is the irq stack, because it should be near empty
                 * at this stage.
                 */
-               __do_softirq();
+               __do_softirq(pending);
 #else
                /*
                 * Otherwise, irq_exit() is called on the task stack that can
                 * be potentially deep already. So call softirq in its own stack
                 * to prevent from any overrun.
                 */
-               do_softirq_own_stack();
+               do_softirq_own_stack(pending);
 #endif
        } else {
                wakeup_softirqd();
@@ -679,7 +680,7 @@ static void run_ksoftirqd(unsigned int cpu)
                 * We can safely run softirq on inline stack, as we are not deep
                 * in the task stack here.
                 */
-               __do_softirq();
+               __do_softirq(~0);
                local_irq_enable();
                cond_resched_rcu_qs();
                return;
-- 
2.13.6

Reply via email to