The following commit has been merged into the sched/core branch of tip:

Commit-ID:     52103be07d8b08311955f8c30e535c2dda290cf4
Gitweb:        
https://git.kernel.org/tip/52103be07d8b08311955f8c30e535c2dda290cf4
Author:        Peter Zijlstra <pet...@infradead.org>
AuthorDate:    Tue, 26 May 2020 18:10:59 +02:00
Committer:     Ingo Molnar <mi...@kernel.org>
CommitterDate: Thu, 28 May 2020 10:54:15 +02:00

smp: Optimize flush_smp_call_function_queue()

The call_single_queue can contain (two) different callbacks,
synchronous and asynchronous. The current interrupt handler runs them
in-order, which means that remote CPUs that are waiting for their
synchronous call can be delayed by running asynchronous callbacks.

Rework the interrupt handler to first run the synchonous callbacks.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Signed-off-by: Ingo Molnar <mi...@kernel.org>
Link: https://lore.kernel.org/r/20200526161907.836818...@infradead.org
---
 kernel/smp.c | 27 +++++++++++++++++++++++----
 1 file changed, 23 insertions(+), 4 deletions(-)

diff --git a/kernel/smp.c b/kernel/smp.c
index 786092a..db2f738 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -209,9 +209,9 @@ void generic_smp_call_function_single_interrupt(void)
  */
 static void flush_smp_call_function_queue(bool warn_cpu_offline)
 {
-       struct llist_head *head;
-       struct llist_node *entry;
        call_single_data_t *csd, *csd_next;
+       struct llist_node *entry, *prev;
+       struct llist_head *head;
        static bool warned;
 
        lockdep_assert_irqs_disabled();
@@ -235,21 +235,40 @@ static void flush_smp_call_function_queue(bool 
warn_cpu_offline)
                                csd->func);
        }
 
+       /*
+        * First; run all SYNC callbacks, people are waiting for us.
+        */
+       prev = NULL;
        llist_for_each_entry_safe(csd, csd_next, entry, llist) {
                smp_call_func_t func = csd->func;
                void *info = csd->info;
 
                /* Do we wait until *after* callback? */
                if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
+                       if (prev) {
+                               prev->next = &csd_next->llist;
+                       } else {
+                               entry = &csd_next->llist;
+                       }
                        func(info);
                        csd_unlock(csd);
                } else {
-                       csd_unlock(csd);
-                       func(info);
+                       prev = &csd->llist;
                }
        }
 
        /*
+        * Second; run all !SYNC callbacks.
+        */
+       llist_for_each_entry_safe(csd, csd_next, entry, llist) {
+               smp_call_func_t func = csd->func;
+               void *info = csd->info;
+
+               csd_unlock(csd);
+               func(info);
+       }
+
+       /*
         * Handle irq works queued remotely by irq_work_queue_on().
         * Smp functions above are typically synchronous so they
         * better run first since some other CPUs may be busy waiting

Reply via email to