For faster operation with pending mask:
pending &= group_to_softirq[group_nr];

Signed-off-by: Dmitry Safonov <d...@arista.com>
---
 kernel/softirq.c | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/kernel/softirq.c b/kernel/softirq.c
index ca8c3db4570d..7de5791c08f9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -54,6 +54,7 @@ EXPORT_SYMBOL(irq_stat);
 #endif
 
 static struct softirq_action softirq_vec[NR_SOFTIRQS] 
__cacheline_aligned_in_smp;
+static unsigned group_to_softirqs[sizeof(softirq_vec[0].group_mask)] 
__cacheline_aligned_in_smp;
 static unsigned __initdata nr_softirq_groups = 0;
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
@@ -650,11 +651,28 @@ static void __init setup_default_softirq_group(unsigned 
nr)
        }
 }
 
+static void __init fill_group_to_softirq_maps(void)
+{
+       unsigned i;
+
+       for (i = 0; i < NR_SOFTIRQS; i++) {
+               u32 mask = softirq_vec[i].group_mask;
+               unsigned j, group = 0;
+
+               while ((j = ffs(mask))) {
+                       group += j - 1;
+                       group_to_softirqs[group] |= (1 << i);
+                       mask >>= j;
+               }
+       }
+}
+
 void __init softirq_init(void)
 {
        int cpu;
 
        setup_default_softirq_group(nr_softirq_groups++);
+       fill_group_to_softirq_maps();
 
        for_each_possible_cpu(cpu) {
                per_cpu(tasklet_vec, cpu).tail =
-- 
2.13.6

Reply via email to