Add a no_map attribute to struct restricted_heap_attachment and struct
restricted_heap to skip the call to dma_map_sgtable() if set. This
avoids trying to map a dma-buf that doens't refer to memory accessible
by the kernel.

Signed-off-by: Jens Wiklander <jens.wiklan...@linaro.org>
---
 drivers/dma-buf/heaps/restricted_heap.c | 17 +++++++++++++----
 drivers/dma-buf/heaps/restricted_heap.h |  2 ++
 2 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/drivers/dma-buf/heaps/restricted_heap.c 
b/drivers/dma-buf/heaps/restricted_heap.c
index 8bc8a5e3f969..4bf28e3727ca 100644
--- a/drivers/dma-buf/heaps/restricted_heap.c
+++ b/drivers/dma-buf/heaps/restricted_heap.c
@@ -16,6 +16,7 @@
 struct restricted_heap_attachment {
        struct sg_table                 *table;
        struct device                   *dev;
+       bool no_map;
 };
 
 static int
@@ -54,6 +55,8 @@ restricted_heap_memory_free(struct restricted_heap *rheap, 
struct restricted_buf
 static int restricted_heap_attach(struct dma_buf *dmabuf, struct 
dma_buf_attachment *attachment)
 {
        struct restricted_buffer *restricted_buf = dmabuf->priv;
+       struct dma_heap *heap = restricted_buf->heap;
+       struct restricted_heap *rheap = dma_heap_get_drvdata(heap);
        struct restricted_heap_attachment *a;
        struct sg_table *table;
 
@@ -70,6 +73,7 @@ static int restricted_heap_attach(struct dma_buf *dmabuf, 
struct dma_buf_attachm
        sg_dma_mark_restricted(table->sgl);
        a->table = table;
        a->dev = attachment->dev;
+       a->no_map = rheap->no_map;
        attachment->priv = a;
 
        return 0;
@@ -92,9 +96,12 @@ restricted_heap_map_dma_buf(struct dma_buf_attachment 
*attachment,
        struct sg_table *table = a->table;
        int ret;
 
-       ret = dma_map_sgtable(attachment->dev, table, direction, 
DMA_ATTR_SKIP_CPU_SYNC);
-       if (ret)
-               return ERR_PTR(ret);
+       if (!a->no_map) {
+               ret = dma_map_sgtable(attachment->dev, table, direction,
+                                     DMA_ATTR_SKIP_CPU_SYNC);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
        return table;
 }
 
@@ -106,7 +113,9 @@ restricted_heap_unmap_dma_buf(struct dma_buf_attachment 
*attachment, struct sg_t
 
        WARN_ON(a->table != table);
 
-       dma_unmap_sgtable(attachment->dev, table, direction, 
DMA_ATTR_SKIP_CPU_SYNC);
+       if (!a->no_map)
+               dma_unmap_sgtable(attachment->dev, table, direction,
+                                 DMA_ATTR_SKIP_CPU_SYNC);
 }
 
 static int
diff --git a/drivers/dma-buf/heaps/restricted_heap.h 
b/drivers/dma-buf/heaps/restricted_heap.h
index 7dec4b8a471b..94cc0842f70d 100644
--- a/drivers/dma-buf/heaps/restricted_heap.h
+++ b/drivers/dma-buf/heaps/restricted_heap.h
@@ -27,6 +27,8 @@ struct restricted_heap {
        unsigned long           cma_paddr;
        unsigned long           cma_size;
 
+       bool                    no_map;
+
        void                    *priv_data;
 };
 
-- 
2.34.1

Reply via email to