From: Barry Song <[email protected]> Instead of performing a flush per SG entry, issue all cache operations first and then flush once. This ultimately benefits __dma_sync_sg_for_cpu() and __dma_sync_sg_for_device().
Cc: Leon Romanovsky <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Will Deacon <[email protected]> Cc: Marek Szyprowski <[email protected]> Cc: Robin Murphy <[email protected]> Cc: Ada Couprie Diaz <[email protected]> Cc: Ard Biesheuvel <[email protected]> Cc: Marc Zyngier <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Ryan Roberts <[email protected]> Cc: Suren Baghdasaryan <[email protected]> Cc: Tangquan Zheng <[email protected]> Signed-off-by: Barry Song <[email protected]> --- kernel/dma/direct.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index a219911c7b90..98bacf562ca1 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -402,12 +402,12 @@ void dma_direct_sync_sg_for_device(struct device *dev, swiotlb_sync_single_for_device(dev, paddr, sg->length, dir); - if (!dev_is_dma_coherent(dev)) { + if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(paddr, sg->length, dir); - arch_sync_dma_flush(); - } } + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_flush(); } #endif @@ -423,10 +423,8 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, for_each_sg(sgl, sg, nents, i) { phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); - if (!dev_is_dma_coherent(dev)) { + if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_cpu(paddr, sg->length, dir); - arch_sync_dma_flush(); - } swiotlb_sync_single_for_cpu(dev, paddr, sg->length, dir); @@ -434,8 +432,10 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, arch_dma_mark_clean(paddr, sg->length); } - if (!dev_is_dma_coherent(dev)) + if (!dev_is_dma_coherent(dev)) { + arch_sync_dma_flush(); arch_sync_dma_for_cpu_all(); + } } /* -- 2.43.0
