The branch main has been updated by alc:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=7f46deccbed74436b62f8fd02655ff4ad89f1023

commit 7f46deccbed74436b62f8fd02655ff4ad89f1023
Author:     Alan Cox <[email protected]>
AuthorDate: 2022-07-31 19:28:30 +0000
Commit:     Alan Cox <[email protected]>
CommitDate: 2022-08-06 18:05:58 +0000

    x86/iommu: Reduce the number of queued invalidation interrupts
    
    Restructure dmar_qi_task() so as to reduce the number of invalidation
    completion interrupts.  Specifically, because processing completed
    invalidations in dmar_qi_task() can take quite some time, don't reenable
    completion interrupts until processing has completed a first time. Then,
    check a second time after reenabling completion interrupts, so that
    any invalidations that complete just before interrupts are reenabled
    do not linger until a future invalidation might raise an interrupt.
    (Recent changes have made checking for completed invalidations cheap; no
    locking is required.)
    
    Reviewed by:    kib
    MFC after:      1 week
    Differential Revision:  https://reviews.freebsd.org/D36054
---
 sys/x86/iommu/intel_qi.c | 45 +++++++++++++++++++++++++++++----------------
 1 file changed, 29 insertions(+), 16 deletions(-)

diff --git a/sys/x86/iommu/intel_qi.c b/sys/x86/iommu/intel_qi.c
index baaf5b472a2c..8a8e656083e3 100644
--- a/sys/x86/iommu/intel_qi.c
+++ b/sys/x86/iommu/intel_qi.c
@@ -411,14 +411,34 @@ dmar_qi_intr(void *arg)
        return (FILTER_HANDLED);
 }
 
+static void
+dmar_qi_drain_tlb_flush(struct dmar_unit *unit)
+{
+       struct iommu_map_entry *entry, *head;
+
+       for (head = unit->tlb_flush_head;; head = entry) {
+               entry = (struct iommu_map_entry *)
+                   atomic_load_acq_ptr((uintptr_t *)&head->tlb_flush_next);
+               if (entry == NULL ||
+                   !dmar_qi_seq_processed(unit, &entry->gseq))
+                       break;
+               unit->tlb_flush_head = entry;
+               iommu_gas_free_entry(head);
+               if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
+                       iommu_gas_free_region(entry);
+               else
+                       iommu_gas_free_space(entry);
+       }
+}
+
 static void
 dmar_qi_task(void *arg, int pending __unused)
 {
        struct dmar_unit *unit;
-       struct iommu_map_entry *entry, *head;
        uint32_t ics;
 
        unit = arg;
+       dmar_qi_drain_tlb_flush(unit);
 
        /*
         * Request an interrupt on the completion of the next invalidation
@@ -428,23 +448,16 @@ dmar_qi_task(void *arg, int pending __unused)
        if ((ics & DMAR_ICS_IWC) != 0) {
                ics = DMAR_ICS_IWC;
                dmar_write4(unit, DMAR_ICS_REG, ics);
-       }
 
-       for (;;) {
-               head = unit->tlb_flush_head;
-               entry = (struct iommu_map_entry *)
-                   atomic_load_acq_ptr((uintptr_t *)&head->tlb_flush_next);
-               if (entry == NULL)
-                       break;
-               if (!dmar_qi_seq_processed(unit, &entry->gseq))
-                       break;
-               unit->tlb_flush_head = entry;
-               iommu_gas_free_entry(head);
-               if ((entry->flags & IOMMU_MAP_ENTRY_RMRR) != 0)
-                       iommu_gas_free_region(entry);
-               else
-                       iommu_gas_free_space(entry);
+               /*
+                * Drain a second time in case the DMAR processes an entry
+                * after the first call and before clearing DMAR_ICS_IWC.
+                * Otherwise, such entries will linger until a later entry
+                * that requests an interrupt is processed.
+                */
+               dmar_qi_drain_tlb_flush(unit);
        }
+
        if (unit->inv_seq_waiters > 0) {
                /*
                 * Acquire the DMAR lock so that wakeup() is called only after

Reply via email to