This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to
avoid invoking cache line invalidation if the driver will just handle it
via a sync_for_cpu or sync_for_device call.

Cc: Benjamin Herrenschmidt <b...@kernel.crashing.org>
Cc: Paul Mackerras <pau...@samba.org>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Alexander Duyck <alexander.h.du...@intel.com>
---
 arch/powerpc/kernel/dma.c |    9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index e64a601..6877e3f 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -203,6 +203,10 @@ static int dma_direct_map_sg(struct device *dev, struct 
scatterlist *sgl,
        for_each_sg(sgl, sg, nents, i) {
                sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
                sg->dma_length = sg->length;
+
+               if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+                       continue;
+
                __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
        }
 
@@ -235,7 +239,10 @@ static inline dma_addr_t dma_direct_map_page(struct device 
*dev,
                                             unsigned long attrs)
 {
        BUG_ON(dir == DMA_NONE);
-       __dma_sync_page(page, offset, size, dir);
+
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               __dma_sync_page(page, offset, size, dir);
+
        return page_to_phys(page) + offset + get_dma_offset(dev);
 }
 

Reply via email to