From: Barry Song <[email protected]>

Currently, arch_sync_dma_for_cpu and arch_sync_dma_for_device
always wait for the completion of each DMA buffer. That is,
issuing the DMA sync and waiting for completion is done in a
single API call.

For scatter-gather lists with multiple entries, this means
issuing and waiting is repeated for each entry, which can hurt
performance. Architectures like ARM64 may be able to issue all
DMA sync operations for all entries first and then wait for
completion together.

To address this, arch_sync_dma_for_* now issues DMA operations in
batch, followed by a flush. On ARM64, the flush is implemented
using a dsb instruction within arch_sync_dma_flush().

For now, add arch_sync_dma_flush() after each
arch_sync_dma_for_*() call. arch_sync_dma_flush() is defined as a
no-op on all architectures except arm64, so this patch does not
change existing behavior. Subsequent patches will introduce true
batching for SG DMA buffers.

Cc: Leon Romanovsky <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Marek Szyprowski <[email protected]>
Cc: Robin Murphy <[email protected]>
Cc: Ada Couprie Diaz <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Marc Zyngier <[email protected]>
Cc: Anshuman Khandual <[email protected]>
Cc: Ryan Roberts <[email protected]>
Cc: Suren Baghdasaryan <[email protected]>
Cc: Joerg Roedel <[email protected]>
Cc: Juergen Gross <[email protected]>
Cc: Stefano Stabellini <[email protected]>
Cc: Oleksandr Tyshchenko <[email protected]>
Cc: Tangquan Zheng <[email protected]>
Signed-off-by: Barry Song <[email protected]>
---
 arch/arm64/include/asm/cache.h |  6 ++++++
 arch/arm64/mm/dma-mapping.c    |  4 ++--
 drivers/iommu/dma-iommu.c      | 37 +++++++++++++++++++++++++---------
 drivers/xen/swiotlb-xen.c      | 24 ++++++++++++++--------
 include/linux/dma-map-ops.h    |  6 ++++++
 kernel/dma/direct.c            |  8 ++++++--
 kernel/dma/direct.h            |  9 +++++++--
 kernel/dma/swiotlb.c           |  4 +++-
 8 files changed, 73 insertions(+), 25 deletions(-)

diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index dd2c8586a725..487fb7c355ed 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -87,6 +87,12 @@ int cache_line_size(void);
 
 #define dma_get_cache_alignment        cache_line_size
 
+static inline void arch_sync_dma_flush(void)
+{
+       dsb(sy);
+}
+#define arch_sync_dma_flush arch_sync_dma_flush
+
 /* Compress a u64 MPIDR value into 32 bits. */
 static inline u64 arch_compact_of_hwid(u64 id)
 {
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index b2b5792b2caa..ae1ae0280eef 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -17,7 +17,7 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
 {
        unsigned long start = (unsigned long)phys_to_virt(paddr);
 
-       dcache_clean_poc(start, start + size);
+       dcache_clean_poc_nosync(start, start + size);
 }
 
 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
@@ -28,7 +28,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
        if (dir == DMA_TO_DEVICE)
                return;
 
-       dcache_inval_poc(start, start + size);
+       dcache_inval_poc_nosync(start, start + size);
 }
 
 void arch_dma_prep_coherent(struct page *page, size_t size)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index c92088855450..6827763a3877 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1095,8 +1095,10 @@ void iommu_dma_sync_single_for_cpu(struct device *dev, 
dma_addr_t dma_handle,
                return;
 
        phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
-       if (!dev_is_dma_coherent(dev))
+       if (!dev_is_dma_coherent(dev)) {
                arch_sync_dma_for_cpu(phys, size, dir);
+               arch_sync_dma_flush();
+       }
 
        swiotlb_sync_single_for_cpu(dev, phys, size, dir);
 }
@@ -1112,8 +1114,10 @@ void iommu_dma_sync_single_for_device(struct device 
*dev, dma_addr_t dma_handle,
        phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
        swiotlb_sync_single_for_device(dev, phys, size, dir);
 
-       if (!dev_is_dma_coherent(dev))
+       if (!dev_is_dma_coherent(dev)) {
                arch_sync_dma_for_device(phys, size, dir);
+               arch_sync_dma_flush();
+       }
 }
 
 void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
@@ -1122,13 +1126,16 @@ void iommu_dma_sync_sg_for_cpu(struct device *dev, 
struct scatterlist *sgl,
        struct scatterlist *sg;
        int i;
 
-       if (sg_dma_is_swiotlb(sgl))
+       if (sg_dma_is_swiotlb(sgl)) {
                for_each_sg(sgl, sg, nelems, i)
                        iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
                                                      sg->length, dir);
-       else if (!dev_is_dma_coherent(dev))
-               for_each_sg(sgl, sg, nelems, i)
+       } else if (!dev_is_dma_coherent(dev)) {
+               for_each_sg(sgl, sg, nelems, i) {
                        arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
+                       arch_sync_dma_flush();
+               }
+       }
 }
 
 void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
@@ -1143,8 +1150,10 @@ void iommu_dma_sync_sg_for_device(struct device *dev, 
struct scatterlist *sgl,
                                                         sg_dma_address(sg),
                                                         sg->length, dir);
        else if (!dev_is_dma_coherent(dev))
-               for_each_sg(sgl, sg, nelems, i)
+               for_each_sg(sgl, sg, nelems, i) {
                        arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
+                       arch_sync_dma_flush();
+               }
 }
 
 static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
@@ -1219,8 +1228,10 @@ dma_addr_t iommu_dma_map_phys(struct device *dev, 
phys_addr_t phys, size_t size,
                        return DMA_MAPPING_ERROR;
        }
 
-       if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
+       if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
                arch_sync_dma_for_device(phys, size, dir);
+               arch_sync_dma_flush();
+       }
 
        iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
        if (iova == DMA_MAPPING_ERROR && !(attrs & DMA_ATTR_MMIO))
@@ -1242,8 +1253,10 @@ void iommu_dma_unmap_phys(struct device *dev, dma_addr_t 
dma_handle,
        if (WARN_ON(!phys))
                return;
 
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) {
                arch_sync_dma_for_cpu(phys, size, dir);
+               arch_sync_dma_flush();
+       }
 
        __iommu_dma_unmap(dev, dma_handle, size);
 
@@ -1836,8 +1849,10 @@ static int __dma_iova_link(struct device *dev, 
dma_addr_t addr,
        bool coherent = dev_is_dma_coherent(dev);
        int prot = dma_info_to_prot(dir, coherent, attrs);
 
-       if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
+       if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
                arch_sync_dma_for_device(phys, size, dir);
+               arch_sync_dma_flush();
+       }
 
        return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
                        prot, GFP_ATOMIC);
@@ -2008,8 +2023,10 @@ static void iommu_dma_iova_unlink_range_slow(struct 
device *dev,
                        end - addr, iovad->granule - iova_start_pad);
 
                if (!dev_is_dma_coherent(dev) &&
-                   !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
+                   !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
                        arch_sync_dma_for_cpu(phys, len, dir);
+                       arch_sync_dma_flush();
+               }
 
                swiotlb_tbl_unmap_single(dev, phys, len, dir, attrs);
 
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ccf25027bec1..b79917e785a5 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -262,10 +262,12 @@ static dma_addr_t xen_swiotlb_map_phys(struct device 
*dev, phys_addr_t phys,
 
 done:
        if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-               if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
+               if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr)))) {
                        arch_sync_dma_for_device(phys, size, dir);
-               else
+                       arch_sync_dma_flush();
+               } else {
                        xen_dma_sync_for_device(dev, dev_addr, size, dir);
+               }
        }
        return dev_addr;
 }
@@ -287,10 +289,12 @@ static void xen_swiotlb_unmap_phys(struct device *hwdev, 
dma_addr_t dev_addr,
        BUG_ON(dir == DMA_NONE);
 
        if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
-               if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
+               if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr)))) {
                        arch_sync_dma_for_cpu(paddr, size, dir);
-               else
+                       arch_sync_dma_flush();
+               } else {
                        xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
+               }
        }
 
        /* NOTE: We use dev_addr here, not paddr! */
@@ -308,10 +312,12 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, 
dma_addr_t dma_addr,
        struct io_tlb_pool *pool;
 
        if (!dev_is_dma_coherent(dev)) {
-               if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
+               if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) {
                        arch_sync_dma_for_cpu(paddr, size, dir);
-               else
+                       arch_sync_dma_flush();
+               } else {
                        xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
+               }
        }
 
        pool = xen_swiotlb_find_pool(dev, dma_addr);
@@ -331,10 +337,12 @@ xen_swiotlb_sync_single_for_device(struct device *dev, 
dma_addr_t dma_addr,
                __swiotlb_sync_single_for_device(dev, paddr, size, dir, pool);
 
        if (!dev_is_dma_coherent(dev)) {
-               if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
+               if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) {
                        arch_sync_dma_for_device(paddr, size, dir);
-               else
+                       arch_sync_dma_flush();
+               } else {
                        xen_dma_sync_for_device(dev, dma_addr, size, dir);
+               }
        }
 }
 
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 4809204c674c..e7dd8a63b40e 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -361,6 +361,12 @@ static inline void arch_sync_dma_for_cpu(phys_addr_t 
paddr, size_t size,
 }
 #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
 
+#ifndef arch_sync_dma_flush
+static inline void arch_sync_dma_flush(void)
+{
+}
+#endif
+
 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
 void arch_sync_dma_for_cpu_all(void);
 #else
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 50c3fe2a1d55..a219911c7b90 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -402,9 +402,11 @@ void dma_direct_sync_sg_for_device(struct device *dev,
 
                swiotlb_sync_single_for_device(dev, paddr, sg->length, dir);
 
-               if (!dev_is_dma_coherent(dev))
+               if (!dev_is_dma_coherent(dev)) {
                        arch_sync_dma_for_device(paddr, sg->length,
                                        dir);
+                       arch_sync_dma_flush();
+               }
        }
 }
 #endif
@@ -421,8 +423,10 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
        for_each_sg(sgl, sg, nents, i) {
                phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
 
-               if (!dev_is_dma_coherent(dev))
+               if (!dev_is_dma_coherent(dev)) {
                        arch_sync_dma_for_cpu(paddr, sg->length, dir);
+                       arch_sync_dma_flush();
+               }
 
                swiotlb_sync_single_for_cpu(dev, paddr, sg->length, dir);
 
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index da2fadf45bcd..a69326eed266 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -60,8 +60,10 @@ static inline void dma_direct_sync_single_for_device(struct 
device *dev,
 
        swiotlb_sync_single_for_device(dev, paddr, size, dir);
 
-       if (!dev_is_dma_coherent(dev))
+       if (!dev_is_dma_coherent(dev)) {
                arch_sync_dma_for_device(paddr, size, dir);
+               arch_sync_dma_flush();
+       }
 }
 
 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
@@ -71,6 +73,7 @@ static inline void dma_direct_sync_single_for_cpu(struct 
device *dev,
 
        if (!dev_is_dma_coherent(dev)) {
                arch_sync_dma_for_cpu(paddr, size, dir);
+               arch_sync_dma_flush();
                arch_sync_dma_for_cpu_all();
        }
 
@@ -109,8 +112,10 @@ static inline dma_addr_t dma_direct_map_phys(struct device 
*dev,
        }
 
        if (!dev_is_dma_coherent(dev) &&
-           !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
+           !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
                arch_sync_dma_for_device(phys, size, dir);
+               arch_sync_dma_flush();
+       }
        return dma_addr;
 
 err_overflow:
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index a547c7693135..7cdbfcdfef86 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -1595,8 +1595,10 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t 
paddr, size_t size,
                return DMA_MAPPING_ERROR;
        }
 
-       if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+       if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
                arch_sync_dma_for_device(swiotlb_addr, size, dir);
+               arch_sync_dma_flush();
+       }
        return dma_addr;
 }
 
-- 
2.43.0


Reply via email to