Networking drivers very often have to replace one page with
another for their RX ring buffers.

A multi-queue NIC will severly hit a contention point
in dma-debug while grabbing free_entries_lock spinlock.

Adding a one entry per-cpu cache removes the need
to grab this spinlock twice per page replacement.

Tested on a 40Gbit mlx4 NIC, with 16 RX queues and about
1,000,000 replacements per second.

Signed-off-by: Eric Dumazet <eduma...@google.com>
Cc: Christoph Hellwig <h...@lst.de>
---
 kernel/dma/debug.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 
a310dbb1515e92c081f8f3f9a7290dd5e53fc889..b7221426ef49cf640db5bcb261b0817d714a3033
 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -97,6 +97,8 @@ static LIST_HEAD(free_entries);
 /* Lock for the list above */
 static DEFINE_SPINLOCK(free_entries_lock);
 
+static DEFINE_PER_CPU(struct dma_debug_entry *, dma_debug_entry_cache);
+
 /* Global disable flag - will be set in case of an error */
 static bool global_disable __read_mostly;
 
@@ -676,6 +678,10 @@ static struct dma_debug_entry *dma_entry_alloc(void)
        struct dma_debug_entry *entry;
        unsigned long flags;
 
+       entry = this_cpu_xchg(dma_debug_entry_cache, NULL);
+       if (entry)
+               goto end;
+
        spin_lock_irqsave(&free_entries_lock, flags);
        if (num_free_entries == 0) {
                if (dma_debug_create_entries(GFP_ATOMIC)) {
@@ -690,7 +696,7 @@ static struct dma_debug_entry *dma_entry_alloc(void)
        entry = __dma_entry_alloc();
 
        spin_unlock_irqrestore(&free_entries_lock, flags);
-
+end:
 #ifdef CONFIG_STACKTRACE
        entry->stack_len = stack_trace_save(entry->stack_entries,
                                            ARRAY_SIZE(entry->stack_entries),
@@ -705,6 +711,9 @@ static void dma_entry_free(struct dma_debug_entry *entry)
 
        active_cacheline_remove(entry);
 
+       if (!this_cpu_cmpxchg(dma_debug_entry_cache, NULL, entry))
+               return;
+
        /*
         * add to beginning of the list - this way the entries are
         * more likely cache hot when they are reallocated.
-- 
2.25.0.341.g760bfbb309-goog

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to