Currently GART writes one page entry at a time. More optimal would be to
aggregate the writes and flush BUS buffer in the end, this gives map/unmap
10-40% (depending on size of mapping) performance boost compared to a
flushing after each entry update.

Signed-off-by: Dmitry Osipenko <[email protected]>
---
 drivers/iommu/tegra-gart.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index ebc105c201bd..26d8735d26e8 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -226,7 +226,6 @@ static int gart_iommu_map(struct iommu_domain *domain, 
unsigned long iova,
                }
        }
        gart_set_pte(gart, iova, GART_PTE(pfn));
-       FLUSH_GART_REGS(gart);
        spin_unlock_irqrestore(&gart->pte_lock, flags);
        return 0;
 }
@@ -243,7 +242,6 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, 
unsigned long iova,
 
        spin_lock_irqsave(&gart->pte_lock, flags);
        gart_set_pte(gart, iova, 0);
-       FLUSH_GART_REGS(gart);
        spin_unlock_irqrestore(&gart->pte_lock, flags);
        return bytes;
 }
@@ -319,6 +317,14 @@ static int gart_iommu_of_xlate(struct device *dev,
        return 0;
 }
 
+static void gart_iommu_sync(struct iommu_domain *domain)
+{
+       struct gart_domain *gart_domain = to_gart_domain(domain);
+       struct gart_device *gart = gart_domain->gart;
+
+       FLUSH_GART_REGS(gart);
+}
+
 static const struct iommu_ops gart_iommu_ops = {
        .capable        = gart_iommu_capable,
        .domain_alloc   = gart_iommu_domain_alloc,
@@ -334,6 +340,8 @@ static const struct iommu_ops gart_iommu_ops = {
        .iova_to_phys   = gart_iommu_iova_to_phys,
        .pgsize_bitmap  = GART_IOMMU_PGSIZES,
        .of_xlate       = gart_iommu_of_xlate,
+       .iotlb_sync_map = gart_iommu_sync,
+       .iotlb_sync     = gart_iommu_sync,
 };
 
 static int gart_iommu_check_device(struct gart_device *gart,
-- 
2.17.0

Reply via email to